repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
marmyshev/transitions | openlp/plugins/songs/lib/mediashoutimport.py | 1 | 5385 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`mediashoutimport` module provides the functionality for importing
a MediaShout database into the OpenLP database.
"""
import pyodbc
from openlp.core.lib import translate
from openlp.plugins.songs.lib.songimport import SongImport
VERSE_TAGS = [u'V', u'C', u'B', u'O', u'P', u'I', u'E']
class MediaShoutImport(SongImport):
"""
The :class:`MediaShoutImport` class provides the ability to import the
MediaShout Access Database
"""
def __init__(self, manager, **kwargs):
"""
Initialise the MediaShout importer.
"""
SongImport.__init__(self, manager, **kwargs)
def doImport(self):
"""
Receive a single file to import.
"""
try:
conn = pyodbc.connect(u'DRIVER={Microsoft Access Driver (*.mdb)};'
u'DBQ=%s;PWD=6NOZ4eHK7k' % self.importSource)
except:
# Unfortunately no specific exception type
self.logError(self.importSource,
translate('SongsPlugin.MediaShoutImport', 'Unable to open the MediaShout database.'))
return
cursor = conn.cursor()
cursor.execute(u'SELECT Record, Title, Author, Copyright, '
u'SongID, CCLI, Notes FROM Songs ORDER BY Title')
songs = cursor.fetchall()
self.importWizard.progressBar.setMaximum(len(songs))
for song in songs:
if self.stopImportFlag:
break
cursor.execute(u'SELECT Type, Number, Text FROM Verses '
u'WHERE Record = %s ORDER BY Type, Number' % song.Record)
verses = cursor.fetchall()
cursor.execute(u'SELECT Type, Number, POrder FROM PlayOrder '
u'WHERE Record = %s ORDER BY POrder' % song.Record)
verse_order = cursor.fetchall()
cursor.execute(u'SELECT Name FROM Themes INNER JOIN SongThemes '
u'ON SongThemes.ThemeId = Themes.ThemeId '
u'WHERE SongThemes.Record = %s' % song.Record)
topics = cursor.fetchall()
cursor.execute(u'SELECT Name FROM Groups INNER JOIN SongGroups '
u'ON SongGroups.GroupId = Groups.GroupId '
u'WHERE SongGroups.Record = %s' % song.Record)
topics += cursor.fetchall()
self.processSong(song, verses, verse_order, topics)
def processSong(self, song, verses, verse_order, topics):
"""
Create the song, i.e. title, verse etc.
"""
self.setDefaults()
self.title = song.Title
self.parseAuthor(song.Author)
self.addCopyright(song.Copyright)
self.comments = song.Notes
for topic in topics:
self.topics.append(topic.Name)
if u'-' in song.SongID:
self.songBookName, self.songNumber = song.SongID.split(u'-', 1)
else:
self.songBookName = song.SongID
for verse in verses:
tag = VERSE_TAGS[verse.Type] + unicode(verse.Number) if verse.Type < len(VERSE_TAGS) else u'O'
self.addVerse(verse.Text, tag)
for order in verse_order:
if order.Type < len(VERSE_TAGS):
self.verseOrderList.append(VERSE_TAGS[order.Type] + unicode(order.Number))
self.finish()
| gpl-2.0 | -6,985,386,025,895,966,000 | 48.842593 | 106 | 0.552108 | false |
rchaber/publishbay | bayforms.py | 1 | 1690 | from wtforms import fields
from wtforms import Form
from wtforms import validators
from boilerplate.lib import utils
from webapp2_extras.i18n import lazy_gettext as _
from webapp2_extras.i18n import ngettext, gettext
from boilerplate import forms as forms
from config import utils as bayutils
FIELD_MAXLENGTH = 50 # intended to stop maliciously long input
class EditProDetails(forms.BaseForm):
display_full_name = fields.RadioField(_('Display Name'), choices=[('True', _('show your full name')), ('False', _(' - show your first name and last initial'))], coerce=unicode)
title = fields.TextField(_('Title'), [validators.Length(max=FIELD_MAXLENGTH)])
profile_visibility = fields.RadioField(_('Profile Visibility'), choices=[
('everyone', _('Anyone can see your profile whether or not they are logged into PublishBay.')),
('pb_users_only', _('Only PublishBay users who are logged in to PublishBay can see your profile.')),
('hidden', _('Clients can see your profile only if you have applied to their job.'))
])
english_level = fields.SelectField(_('English level'), choices=[1, 2, 3, 4, 5])
class EditContactInfo(forms.BaseForm):
address1 = fields.TextField(_('Address 1'), [validators.Length(max=FIELD_MAXLENGTH)])
address2 = fields.TextField(_('Address 2'), [validators.Length(max=FIELD_MAXLENGTH)])
city = fields.TextField(_('City'), [validators.Length(max=FIELD_MAXLENGTH)])
state = fields.TextField(_('State'), [validators.Length(max=FIELD_MAXLENGTH)])
zipcode = fields.TextField(_('ZIP'), [validators.Length(max=FIELD_MAXLENGTH)])
phone = fields.TextField(_('Phone'), [validators.Length(max=FIELD_MAXLENGTH)])
| lgpl-3.0 | 1,359,037,825,371,715,600 | 51.8125 | 180 | 0.714793 | false |
coderbone/SickRage-alt | sickbeard/providers/morethantv.py | 1 | 9942 | # coding=utf-8
# Author: Dustyn Gibson <[email protected]>
#
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function, unicode_literals
# Stdlib Imports
import re
# Third Party Imports
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
# First Party Imports
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickbeard.show_name_helpers import allPossibleShowNames
from sickchill.helper.common import convert_size, try_int
from sickchill.helper.exceptions import AuthException
from sickchill.providers.torrent.TorrentProvider import TorrentProvider
class MoreThanTVProvider(TorrentProvider):
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "MoreThanTV")
# Credentials
self.username = None
self.password = None
self._uid = None
self._hash = None
# Torrent Stats
self.minseed = None
self.minleech = None
self.freeleech = None
# URLs
self.url = 'https://www.morethan.tv/'
self.urls = {
'login': urljoin(self.url, 'login.php'),
'search': urljoin(self.url, 'torrents.php'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK']
# Cache
self.cache = tvcache.TVCache(self)
def _check_auth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
'keeplogged': '1',
'login': 'Log in',
}
response = self.get_url(self.urls['login'], post_data=login_params, returns='text')
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
if re.search('Your username or password was incorrect.', response):
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None):
results = []
if not self.login():
return results
# Search Params
search_params = {
'tags_type': 1,
'order_by': 'time',
'order_way': 'desc',
'action': 'basic',
'searchsubmit': 1,
'searchstr': ''
}
# Units
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def process_column_header(td):
result = ''
if td.a and td.a.img:
result = td.a.img.get('title', td.a.get_text(strip=True))
if not result:
result = td.get_text(strip=True)
return result
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
if mode == 'Season':
searchedSeason = re.match('.*\s(Season\s\d+|S\d+)', search_string).group(1)
search_params['searchstr'] = search_string
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
logger.log("No data returned from provider", logger.DEBUG)
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', class_='torrent_table')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
labels = [process_column_header(label) for label in torrent_rows[0]('td')]
# Skip column headers
for result in torrent_rows[1:]:
try:
# skip if torrent has been nuked due to poor quality
if result.find('img', alt='Nuked'):
continue
title = result.find('a', title='View torrent').get_text(strip=True)
if mode == 'Season':
# Skip if torrent isn't the right season, we can't search
# for an exact season on MTV, it returns all of them
if searchedSeason not in title:
continue
# If torrent is grouped, we need a folder name for title
if 'Season' in title:
torrentid = urljoin(self.url, result.find('span', title='Download').parent['href'])
torrentid = re.match('.*?id=([0-9]+)', torrentid).group(1)
group_params = {
'torrentid': torrentid
}
# Obtain folder name to use as title
torrentInfo = self.get_url(self.urls['search'], params=group_params,
returns='text').replace('\n', '')
releaseregex = '.*files_{0}.*?;">/(.+?(?=/))'.format(re.escape(torrentid))
releasename = re.search(releaseregex, torrentInfo).group(1)
title = releasename
download_url = urljoin(self.url, result.find('span', title='Download').parent['href'])
if not all([title, download_url]):
continue
cells = result('td')
seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log("Discarding torrent because it doesn't meet the"
" minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = cells[labels.index('Size')].get_text(strip=True)
size = convert_size(torrent_size, units=units) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
def get_season_search_strings(self, episode):
search_string = {
'Season': []
}
for show_name in allPossibleShowNames(episode.show, season=episode.scene_season):
season_string = show_name + ' '
if episode.show.air_by_date or episode.show.sports:
season_string += str(episode.airdate).split('-')[0]
elif episode.show.anime:
# use string below if you really want to search on season with number
# season_string += 'Season ' + '{0:d}'.format(int(episode.scene_season))
season_string += 'Season' # ignore season number to get all seasons in all formats
else:
season_string += 'S{0:02d}'.format(int(episode.scene_season))
# MTV renames most season packs to just "Season ##"
mtv_season_string = '{0} Season {1}'.format(show_name, int(episode.scene_season))
search_string['Season'].append(mtv_season_string.encode('utf-8').strip())
search_string['Season'].append(season_string.encode('utf-8').strip())
return [search_string]
provider = MoreThanTVProvider()
| gpl-3.0 | -2,864,894,290,968,248,000 | 39.251012 | 141 | 0.520821 | false |
sapcc/monasca-notification | monasca_notification/plugins/hipchat_notifier.py | 1 | 4900 | # (C) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urlparse
import requests
from monasca_notification.monitoring import client
from monasca_notification.monitoring.metrics import NOTIFICATION_SEND_TIMER
from monasca_notification.plugins import abstract_notifier
"""
notification.address = https://hipchat.hpcloud.net/v2/room/<room_id>/notification?auth_token=432432
How to get access token?
1) Login to Hipchat with the user account which is used for notification
2) Go to this page. https://hipchat.hpcloud.net/account/api (Replace your hipchat server name)
3) You can see option to "Create token". Use the capability "SendNotification"
How to get the Room ID?
1) Login to Hipchat with the user account which is used for notification
2) Go to this page. https://hipchat.hpcloud.net/account/api (Replace your hipchat server name)
3) Click on the Rooms tab
4) Click on any Room of your choice.
5) Room ID is the API ID field
"""
SEVERITY_COLORS = {"low": 'green',
'medium': 'gray',
'high': 'yellow',
'critical': 'red'}
STATSD_CLIENT = client.get_client()
STATSD_TIMER = STATSD_CLIENT.get_timer()
class HipChatNotifier(abstract_notifier.AbstractNotifier):
def __init__(self, log):
super(HipChatNotifier, self).__init__("hipchat")
self._log = log
def _build_hipchat_message(self, notification):
"""Builds hipchat message body
"""
body = {'alarm_id': notification.alarm_id,
'alarm_definition_id': notification.raw_alarm['alarmDefinitionId'],
'alarm_name': notification.alarm_name,
'alarm_description': notification.raw_alarm['alarmDescription'],
'alarm_timestamp': notification.alarm_timestamp,
'state': notification.state,
'old_state': notification.raw_alarm['oldState'],
'message': notification.message,
'tenant_id': notification.tenant_id,
'metrics': notification.metrics}
hipchat_request = {}
hipchat_request['color'] = self._get_color(notification.severity.lower())
hipchat_request['message_format'] = 'text'
hipchat_request['message'] = json.dumps(body, indent=3)
return hipchat_request
def _get_color(self, severity):
return SEVERITY_COLORS.get(severity, 'purple')
@STATSD_TIMER.timed(NOTIFICATION_SEND_TIMER, dimensions={'notification_type': 'hipchat'})
def send_notification(self, notification):
"""Send the notification via hipchat
Posts on the given url
"""
hipchat_message = self._build_hipchat_message(notification)
parsed_url = urlparse.urlsplit(notification.address)
query_params = urlparse.parse_qs(parsed_url.query)
# URL without query params
url = urlparse.urljoin(notification.address, urlparse.urlparse(notification.address).path)
# Default option is to do cert verification
verify = self._config.get('insecure', False)
# If ca_certs is specified, do cert validation and ignore insecure flag
if (self._config.get("ca_certs")):
verify = self._config.get("ca_certs")
proxyDict = None
if (self._config.get("proxy")):
proxyDict = {"https": self._config.get("proxy")}
try:
# Posting on the given URL
result = requests.post(url=url,
data=hipchat_message,
verify=verify,
params=query_params,
proxies=proxyDict,
timeout=self._config['timeout'])
if result.status_code in range(200, 300):
self._log.info("Notification successfully posted.")
return True
else:
msg = "Received an HTTP code {} when trying to send to hipchat on URL {} with response {}."
self._log.error(msg.format(result.status_code, url, result.text))
return False
except Exception:
self._log.exception("Error trying to send to hipchat on URL {}".format(url))
return False
| apache-2.0 | 473,459,232,434,491,200 | 39.833333 | 107 | 0.627143 | false |
chrysante87/pyterpol | synthetic/auxiliary.py | 1 | 10363 | import numpy as np
import matplotlib.pyplot as plt
from astropy.constants import c
from scipy.interpolate import splrep
from scipy.interpolate import splev
from scipy.interpolate import bisplrep
from scipy.interpolate import bisplev
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.interpolate import spline
from scipy.signal import fftconvolve
ZERO_TOLERANCE = 1e-6
def flatten_2d(arr):
"""
Flattens 2-dim array
:param arr: 2d array
:return:
"""
newarr = []
if any([isinstance(subarr, (list, tuple)) for subarr in arr]):
for subarr in arr:
if isinstance(subarr, (tuple, list)):
newarr.extend(subarr)
else:
newarr.append(subarr)
return newarr
else:
return arr
def instrumental_broadening(wave, flux, width=0.25, width_type='fwhm', interpolate_back=True):
"""
A convolution of a spectrum with a normal distribution.
:param: wave:
:param: flux:
:param width:
:param width_type:
:return:
"""
# print "Computing instr. broadening."
# If there is no broadening to apply, don't bother
if width < ZERO_TOLERANCE:
return flux
# Convert user input width type to sigma (standard devation)
width_type = width_type.lower()
if width_type == 'fwhm':
sigma = width / 2.3548
elif width_type == 'sigma':
sigma = width
else:
raise ValueError(("Unrecognised width_type='{}' (must be one of 'fwhm'"
"or 'sigma')").format(width_type))
# Make sure the wavelength range is equidistant before applying the
# convolution
delta_wave = np.diff(wave).min()
range_wave = wave.ptp()
n_wave = int(range_wave / delta_wave) + 1
wave_ = np.linspace(wave[0], wave[-1], n_wave)
# flux_ = np.interp(wave_, wave, flux)
flux_ = interpolate_spec(wave, flux, wave_)
dwave = wave_[1] - wave_[0]
n_kernel = int(2 * 4 * sigma / dwave)
# The kernel might be of too low resolution, or the the wavelength range
# might be too narrow. In both cases, raise an appropriate error
if n_kernel == 0:
raise ValueError(("Spectrum resolution too low for "
"instrumental broadening (delta_wave={}, "
"width={}").format(delta_wave, width))
elif n_kernel > n_wave:
raise ValueError(("Spectrum range too narrow for "
"instrumental broadening"))
# Construct the broadening kernel
wave_k = np.arange(n_kernel) * dwave
wave_k -= wave_k[-1] / 2.
kernel = np.exp(- (wave_k) ** 2 / (2 * sigma ** 2))
kernel /= sum(kernel)
# Convolve the flux with the kernel
flux_conv = fftconvolve(1 - flux_, kernel, mode='same')
# And interpolate the results back on to the original wavelength array,
# taking care of even vs. odd-length kernels
if n_kernel % 2 == 1:
offset = 0.0
else:
offset = dwave / 2.0
if interpolate_back:
flux = np.interp(wave + offset, wave_, 1 - flux_conv, left=1, right=1)
# flux = interpolate_spec(wave_, 1-flux_conv, wave+offset)
# Return the results.
return flux
def interpolate_block(x, block, xnew):
"""
Interpolates in each line of a 2d array.
:param x: independent variable
:type x: numpy.float64
:param block: 2d array for each column f(x)= block[i]
:type block: numpy.float64
:param xnew: point at which it is interpolated
:type xnew: float
:return:
"""
intens = np.zeros(len(block[0]))
n = len(block[:, 0])
# set up the order of interpolation
if n > 4:
k = 3
else:
k = n - 1
# k=3
# TODO Can thius be done faster with bisplrep and bisplev
# do the interpolation
for i in range(0, len(block[0])):
y = block[:, i]
tck = splrep(x, y, k=k)
intens[i] = splev(xnew, tck, der=0)
return intens
def interpolate_block_faster(x, block, xnew):
"""
Interpolation of teh spectra... hopefully faster?
:param x:
:param block:
:param xnew:
:return:
"""
# length of the datablock
nx = len(block[0])
ny = len(x)
# print x
if (ny > 3) & (ny < 6):
ky = 3
elif ny > 5:
ky = 5
else:
ky = ny - 1
# print ky
f = RectBivariateSpline(x, np.arange(nx), block, kx=ky, ky=1)
intens = f(xnew, np.arange(nx))[0]
return intens
def interpolate_spec(wave0, intens0, wave1):
"""
Defines a function intens0 = f(wave0) and
than interpolates in it at wave1.
:param wave0: initial wavelength array
:type wave0: numpy.float64
:param intens0: initial intensity array
:type intens0: numpy.float64
:param wave1: wavelength array at which we interpolate
:type wave1: numpy.float64
:return intens1: final intensity array
:rtype intens1: numpy.float64
"""
tck = splrep(wave0, intens0, k=3)
intens1 = splev(wave1, tck)
return intens1
def is_within_interval(v, arr):
"""
Tests whether value v lies within interval [min(arr); max(arr)]
:param v: tested values
:type v: numpy.float64
:param arr: tested array
:type v: numpy.float64
:return:
:param:
:type: bool
"""
# print v, max(arr), min(arr)
if (v - max(arr) > ZERO_TOLERANCE) | (min(arr) - v > ZERO_TOLERANCE):
return False
else:
return True
def generate_least_number(l):
"""
Goes over integer in list and finds the
smallest integer not in the list.
:param l: the list
:return: int the smallest integer
"""
num = 0
while num in l:
num += 1
return num
def keys_to_lowercase(d):
"""
Converts dictionary keys to lowercase
:param d the converted dictionary
:return: dnew
"""
dnew = {}
for key in d.keys():
keynew = key.lower()
dnew[keynew] = d[key]
return dnew
def parlist_to_list(l, property='value'):
"""
Converts a list of Parameter class to a
regular list - only the property is returned
:param l:
:param prop:
:return:
"""
ol = []
for par in l:
ol.append(par[property])
return ol
def sum_dict_keys(d):
"""
Sums dictionary key records.
:param d: the dictionary
:return: s the sum
"""
s = 0.0
for key in d.keys():
s += d[key]
return s
def read_text_file(f):
"""
Reads ascii file f.
:param f: the file
:type f: str
:return lines: list of all lines within file f
:rtype: list
"""
ifile = open(f, 'r')
lines = ifile.readlines()
ifile.close()
return lines
def renew_file(f):
"""
Deletes an existing file.
:param f:
:return:
"""
ofile = open(f, 'w')
ofile.close()
def rotate_spectrum(wave, intens, vrot, epsilon=0.6, interpolate_back=True):
"""
Rotates a spectrum represented by arrays wave and intes to the prjected
rotational velocity vrot.
:param wave: wavelength array
:type wave: numpy.float64
:param intens: intensity array
:type intens: numpy.float64
:param vrot: projected rotational velocity in km/s
:type vrot: float
:param epsilon: Coefficient of linear limb-darkening.
:type epsilon: float
:param interpolate_back: interpolate the spectrum back to the original wavelength sampling
:type interpolate_back: bool
:return intens: the rotated spectrum in the original wavelength sanmpling
:rtype intens: numpy.float64
:return intens_conv: the rotated spectrum equidistant in rv
:rtype intens_conv: numpy.float64
:return wave_conv: the wavelength array equidistant in rv
:rtype wave_conv: numpy.float64
"""
if vrot > ZERO_TOLERANCE:
# we need it equidistant in RV
wave_log = np.log(wave)
rv = np.linspace(wave_log[0], wave_log[-1], len(wave))
step = rv[1] - rv[0]
# interpolate
intens_rv = interpolate_spec(wave_log, intens, rv)
# scale rotational velocity with light speed
vrot = 1000 * vrot / c.value
# get the kernel
# velocity vector
n = int(np.ceil(2 * vrot / step))
rv_ker = np.arange(n) * step
rv_ker = rv_ker - rv_ker[-1] / 2.
y = 1 - (rv_ker / vrot) ** 2
# the kernel
kernel = (2 * (1 - epsilon) * np.sqrt(y) + np.pi * epsilon / 2. * y) / (np.pi * vrot * (1 - epsilon / 3.0))
kernel = kernel / kernel.sum()
# convolve the flux
intens_conv = fftconvolve(1 - intens_rv, kernel, mode='same')
if n % 2 == 1:
rv = np.arange(len(intens_conv)) * step + rv[0]
else:
rv = np.arange(len(intens_conv)) * step + rv[0] - step / 2.
wave_conv = np.exp(rv)
# interpolate back
if interpolate_back:
intens = interpolate_spec(wave_conv, 1 - intens_conv, wave)
return intens
else:
return 1 - intens_conv, wave_conv
def shift_spectrum(wave, RV):
"""
Doppler-shifts spectrum.
:param wave: original wavelength array
:type wave: numpy.float64
:param RV: radial velocity in km/s
:type RV: float
:return new_wave: shifted wavelength array
:rtype new_wave: numpy.float64
"""
# shifts the wavelengths
new_wave = wave * (1 + RV * 1000 / c.value)
return new_wave
def select_index_for_multiple_keywords(d, **kwargs):
"""
From a dictionary of lists selects
one index meeting all requirements.
:param kwargs:
:return:
"""
keys = d.keys()
length = len(d[keys[0]])
for i in range(0, length):
for k in keys:
if d[k] == kwargs[k] and k == keys[-1]:
return i
return -1
def string2bool(s):
"""
Converts string to boolean.
:param s:
:return:
"""
if s.lower() in ['true', '1']:
return True
else:
return False
def write_numpy(f, cols, fmt):
"""
An example of lack of brain of the main developer of this "code".
:param f: outputfile or handler
:param cols: block of data to be writte
:param fmt: format of the blocs
:return: None
"""
np.savetxt(f, cols, fmt=fmt)
| gpl-2.0 | 3,257,326,317,646,513,000 | 24.09201 | 115 | 0.595098 | false |
jessamynsmith/eggtimer-server | periods/tests/management/commands/test_email_active_users.py | 1 | 2900 | import datetime
import pytz
from django.test import TestCase
from mock import patch
from periods import models as period_models
from periods.management.commands import email_active_users
from periods.tests.factories import FlowEventFactory
TIMEZONE = pytz.timezone("US/Eastern")
class TestCommand(TestCase):
def setUp(self):
self.command = email_active_users.Command()
flow_event = FlowEventFactory()
self.user = flow_event.user
FlowEventFactory(user=self.user,
timestamp=TIMEZONE.localize(datetime.datetime(2014, 2, 28)))
@patch('django.core.mail.EmailMultiAlternatives.send')
def test_email_active_users_no_periods(self, mock_send):
period_models.FlowEvent.objects.all().delete()
self.command.handle()
self.assertFalse(mock_send.called)
@patch('django.core.mail.EmailMultiAlternatives.send')
@patch('periods.models.today')
def test_email_active_users_send_disabled(self, mock_today, mock_send):
mock_today.return_value = TIMEZONE.localize(datetime.datetime(2014, 3, 14))
self.user.send_emails = False
self.user.save()
self.command.handle()
self.assertFalse(mock_send.called)
@patch('periods.email_sender.send')
@patch('periods.models.today')
def test_email_active_users(self, mock_today, mock_send):
mock_today.return_value = TIMEZONE.localize(datetime.datetime(2014, 3, 15))
self.command.handle()
email_text = ('Hello ,\n\nThis is an important notification about the data in your '
'eggtimer account.\n\nUntil now, eggtimer has been storing all data in '
'Eastern time. As you may already be aware,\nthis creates issues for users '
'in other timezones. I am going to update the application so all\ndata is '
'stored in UTC. This may affect your data!\n\nIf you are in Eastern time, '
'your data will be migrated correctly, and you need do nothing.\n\nIf you '
'have been using eggtimer from another timezone, you have two options:\n1) '
'Before July 14, edit your user profile to select your timezone. When the '
'data migration is\nperformed, I will use the timezone on your profile.\n2) '
'Do nothing, and your data will be migrated '
'as if it is in Eastern time. This will likely\nresult in a time shift when '
'you view your events. If desired, you can then edit events yourself.\n\nI '
'apologize for the inconvenience.\n\nSincerely,\n\n')
mock_send.assert_called_once_with(self.user, 'Important information about the data in your '
'eggtimer account', email_text, None)
| mit | -7,983,820,550,675,869,000 | 45.774194 | 100 | 0.637586 | false |
hmendozap/master-arbeit-projects | autosk_dev_test/utilities/test_two_GPUs_multiprocessing.py | 1 | 3587 | """ Test script that uses two GPUs, one per sub-process,
via the Python multiprocessing module. Each GPU fits a logistic regression model. """
# These imports will not trigger any theano GPU binding
from multiprocessing import Process, Manager
import numpy as np
import os
def f(shared_args,private_args):
""" Build and fit a logistic regression model. Adapted from
http://deeplearning.net/software/theano/tutorial/examples.html#a-real-example-logistic-regression
"""
# Import sandbox.cuda to bind the specified GPU to this subprocess
# then import the remaining theano and model modules.
import theano.sandbox.cuda
theano.sandbox.cuda.use(private_args['gpu'])
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
rng = np.random
# Pull the size of the matrices from
shared_args_dict = shared_args[0]
N = shared_args_dict['N']
feats = shared_args_dict['n_features']
D = (rng.randn(N, feats), rng.randint(size=N,low=0, high=2))
training_steps = shared_args_dict['n_steps']
# Declare Theano symbolic variables
x = T.matrix("x")
y = T.vector("y")
w = theano.shared(rng.randn(feats), name="w")
b = theano.shared(0., name="b")
print "Initial model:"
print w.get_value(), b.get_value()
# Construct Theano expression graph
p_1 = 1 / (1 + T.exp(-T.dot(x, w) - b)) # Probability that target = 1
prediction = p_1 > 0.5 # The prediction thresholded
xent = -y * T.log(p_1) - (1-y) * T.log(1-p_1) # Cross-entropy loss function
cost = xent.mean() + 0.01 * (w ** 2).sum()# The cost to minimize
gw,gb = T.grad(cost, [w, b]) # Compute the gradient of the cost
# (we shall return to this in a
# following section of this tutorial)
# Compile. allow_input_downcast reassures the compiler that we are ok using
# 64 bit floating point numbers on the cpu, gut only 32 bit floats on the gpu.
train = theano.function(
inputs=[x,y],
outputs=[prediction, xent],
updates=((w, w - 0.1 * gw), (b, b - 0.1 * gb)), allow_input_downcast=True)
predict = theano.function(inputs=[x], outputs=prediction, allow_input_downcast=True)
# Train
for i in range(training_steps):
pred, err = train(D[0], D[1])
print "Final model:"
print w.get_value(), b.get_value()
print "target values for D:", D[1]
print "prediction on D:", predict(D[0])
if __name__ == '__main__':
# Construct a dict to hold arguments that can be shared by both processes
# The Manager class is a convenient to implement this
# See: http://docs.python.org/2/library/multiprocessing.html#managers
#
# Important: managers store information in mutable *proxy* data structures
# but any mutation of those proxy vars must be explicitly written back to the manager.
manager = Manager()
args = manager.list()
args.append({})
shared_args = args[0]
shared_args['N'] = 400
shared_args['n_features'] = 784
shared_args['n_steps'] = 10000
args[0] = shared_args
# Construct the specific args for each of the two processes
p_args = {}
q_args = {}
p_args['gpu'] = 'gpu0'
q_args['gpu'] = 'gpu1'
# Run both sub-processes
p = Process(target=f, args=(args,p_args,))
q = Process(target=f, args=(args,q_args,))
p.start()
q.start()
p.join()
q.join() | mit | 6,711,862,250,160,399,000 | 34.88 | 101 | 0.618902 | false |
CommonsDev/dataserver | projects/migrations/0014_auto__add_historicalproject.py | 1 | 11787 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'HistoricalProject'
db.create_table(u'projects_historicalproject', (
(u'id', self.gf('django.db.models.fields.IntegerField')(db_index=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('autoslug.fields.AutoSlugField')(unique_with=(), max_length=50, populate_from=None)),
('baseline', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('location_id', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('website', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('begin_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('end_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('progress_id', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(blank=True)),
(u'history_id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
(u'history_date', self.gf('django.db.models.fields.DateTimeField')()),
(u'history_user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL)),
(u'history_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
))
db.send_create_signal(u'projects', ['HistoricalProject'])
def backwards(self, orm):
# Deleting model 'HistoricalProject'
db.delete_table(u'projects_historicalproject')
models = {
u'accounts.profile': {
'Meta': {'object_name': 'Profile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'projects.historicalproject': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalProject'},
'baseline': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'begin_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'location_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'progress_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'projects.project': {
'Meta': {'object_name': 'Project'},
'baseline': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'begin_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['scout.Place']", 'null': 'True', 'blank': 'True'}),
'progress': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectProgress']", 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'projects.projectprogress': {
'Meta': {'ordering': "['order']", 'object_name': 'ProjectProgress'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'progress_range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectProgressRange']"})
},
u'projects.projectprogressrange': {
'Meta': {'object_name': 'ProjectProgressRange'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()'})
},
u'projects.projectteam': {
'Meta': {'object_name': 'ProjectTeam'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounts.Profile']", 'symmetrical': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"})
},
u'scout.place': {
'Meta': {'object_name': 'Place'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place'", 'to': u"orm['scout.PostalAddress']"}),
'geo': ('django.contrib.gis.db.models.fields.PointField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'scout.postaladdress': {
'Meta': {'object_name': 'PostalAddress'},
'address_locality': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_office_box_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'street_address': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['projects'] | agpl-3.0 | -7,540,497,227,849,037,000 | 76.552632 | 195 | 0.562823 | false |
zhmz90/CS231N | assign/assignment1/cs231n/classifiers/softmax.py | 1 | 2626 | import numpy as np
from random import shuffle
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
num_train = X.shape[0]
for i in xrange(num_train):
z = X[i].dot(W)
z_exp = np.exp(z)
scores = z_exp / np.sum(z_exp)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
| mit | 6,263,552,307,527,689,000 | 37.617647 | 79 | 0.441356 | false |
sxhexe/reaction-route-search | reactionroute_web/reaction/reaction/urls.py | 1 | 1111 | """reaction URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import RedirectView
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^search/', include('search.urls')),
url(r'^demo/', include('demo.urls')),
url(r'^$', RedirectView.as_view(url='/search/demo/')),
] # + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| mit | -4,855,968,317,904,313,000 | 40.148148 | 79 | 0.706571 | false |
Nentix/xentriq.docs | docs/conf.py | 1 | 4649 | # -*- coding: utf-8 -*-
#
# Xentriq documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 26 16:44:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Xentriq'
copyright = u'2017 - 2018 nentix.com'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'-'
# The full version, including alpha/beta/rc tags.
release = u'-'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Xentriqdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Xentriq.tex', u'Xentriq Documentation', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'xentriq', u'Xentriq Documentation',
[], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Xentriq', u'Xentriq Documentation',
'Xentriq', 'One line description of project.',
'Miscellaneous'),
]
| mit | 6,299,663,179,183,129,000 | 29.188312 | 79 | 0.672188 | false |
warrickball/figures | hmi_rot2d_coaster.py | 1 | 2261 | #!/usr/bin/env python
import numpy as np
from matplotlib import pyplot as pl
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--figsize', type=float, nargs=2,
help="figure size, passed to rcParams['figure.figsize']")
parser.add_argument('--levels', type=int, default=20,
help="number of levels passed to contourf (default 100)")
parser.add_argument('--padding', type=float, default=0.01,
help="fractional padding between edge and circle (default=0.01)")
args = parser.parse_args()
if args.figsize:
pl.rcParams['figure.figsize'] = args.figsize
# data from SDO/HMI webpage
# http://jsoc.stanford.edu/HMI/Global_products.html
try:
rot2d = np.load('data/hmi_rot2d.npy')
err2d = np.load('data/hmi_err2d.npy')
rmesh = np.load('data/hmi_rmesh.npy')
except IOError:
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
response = urlopen('http://jsoc.stanford.edu/SUM86/D917240671/S00000/rot.2d')
rot2d = np.loadtxt(response.readlines())
response.close()
np.save('data/hmi_rot2d.npy', rot2d)
response = urlopen('http://jsoc.stanford.edu/SUM86/D917240671/S00000/err.2d')
err2d = np.loadtxt(response.readlines())
response.close()
np.save('data/hmi_err2d.npy', err2d)
response = urlopen('http://jsoc.stanford.edu/SUM86/D917240671/S00000/rmesh.orig')
rmesh = np.loadtxt(response.readlines())[::4]
response.close()
np.save('data/hmi_rmesh.npy', rmesh)
# rot2d has 49 columns, latitudes are 90-i*15/8; i starts at 0
lat = np.array([15./8.*i for i in np.arange(49)])/180.*np.pi
r, th = np.meshgrid(rmesh, lat)
ax = pl.subplot(111, projection='polar')
b = args.padding
pl.subplots_adjust(top=1-b, bottom=b, left=b, right=1-b)
data = rot2d.T[::-1]
data[err2d.T[::-1]/data>0.01] = np.nan
ax.contourf(th, r, data, args.levels)
ax.contourf(np.pi-th, r, data, args.levels)
ax.contourf(-th, r, data, args.levels)
ax.contourf(th-np.pi, r, data, args.levels)
# plot base of convection zone
th = np.linspace(0., 2.*np.pi, 401)
r = np.ones(len(th))*0.713
ax.plot(th, r, 'k--')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(False)
pl.show()
| gpl-3.0 | 8,172,687,639,470,969,000 | 30.84507 | 85 | 0.670057 | false |
WaveBlocks/WaveBlocksND | WaveBlocksND/InnerProduct.py | 1 | 2782 | """The WaveBlocks Project
This file contains the interface for general quadratures.
Do not confuse quadratures with quadrature rules! Quadrature rules
are structs containing just nodes and weights and some convenience
methods. InnerProducts are classes that really can compute things
like inner products (brakets) etc.
@author: R. Bourquin
@copyright: Copyright (C) 2011, 2012, 2013 R. Bourquin
@license: Modified BSD License
"""
__all__ = ["InnerProduct", "InnerProductException"]
class InnerProduct(object):
r"""This class is an abstract interface to inner products in general.
"""
def __init__(self):
r"""General interface for quadratures.
:raise: :py:class:`NotImplementedError` Abstract interface.
"""
raise NotImplementedError("'InnerProduct' is an abstract interface.")
def __str__(self):
raise NotImplementedError("'InnerProduct' is an abstract interface.")
def get_description(self):
r"""Return a description of this inner product object.
A description is a ``dict`` containing all key-value pairs
necessary to reconstruct the current instance. A description
never contains any data.
"""
raise NotImplementedError("'InnerProduct' is an abstract interface.")
def set_delegate(self, delegate):
r"""Set the :py:class:`Quadrature` subclass instance used for quadrature.
:param delegate: The new :py:class:`Quadrature` instance.
"""
# TODO: Allow a list of quads, one quad for each component of Psi
self._delegate = delegate
def get_delegate(self):
r"""Return the :py:class:`Quadrature` subclass instance
used for evaluation of this inner product.
:return: The current instance of the quadrature.
"""
return self._delegate
def quadrature(self):
r"""Performs the quadrature of :math:`\langle\Psi|f|\Psi\rangle` for a general
function :math:`f(x)` with :math:`x \in \mathbb{R}^D`.
Note that the arguments may vary through subclasses!
:raise: :py:class:`NotImplementedError` Abstract interface.
"""
raise NotImplementedError("'InnerProduct' is an abstract interface.")
def build_matrix(self):
r"""Calculate the matrix elements of :math:`\langle\Psi|f|\Psi\rangle`
for a general function :math:`f(x)` with :math:`x \in \mathbb{R}^D`.
Note that the arguments may vary through subclasses!
:raise: :py:class:`NotImplementedError` Abstract interface.
"""
raise NotImplementedError("'InnerProduct' is an abstract interface.")
class InnerProductException(Exception):
r"""Exception to raise in case an inner product fails for whatever reason.
"""
pass
| bsd-3-clause | -5,012,496,759,558,997,000 | 32.518072 | 86 | 0.675413 | false |
MattDevo/edk2 | BaseTools/Source/Python/Table/TableQuery.py | 1 | 2532 | ## @file
# This file is used to create/update/query/erase table for Queries
#
# Copyright (c) 2008, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
import Common.EdkLogger as EdkLogger
from Common.StringUtils import ConvertToSqlString
from Table.Table import Table
## TableQuery
#
# This class defined a table used for Query
#
# @param object: Inherited from object class
#
#
class TableQuery(Table):
def __init__(self, Cursor):
Table.__init__(self, Cursor)
self.Table = 'Query'
## Create table
#
# Create table Query
#
# @param ID: ID of a Query
# @param Name: Name of a Query
# @param Modifer: Modifier of a Query
# @param Value: Type of a Query
# @param Model: Model of a Query
#
def Create(self):
SqlCommand = """create table IF NOT EXISTS %s(ID INTEGER PRIMARY KEY,
Name TEXT DEFAULT '',
Modifier TEXT DEFAULT '',
Value TEXT DEFAULT '',
Model INTEGER DEFAULT 0
)""" % self.Table
Table.Create(self, SqlCommand)
## Insert table
#
# Insert a record into table Query
#
# @param ID: ID of a Query
# @param Name: Name of a Query
# @param Modifier: Modifier of a Query
# @param Value: Value of a Query
# @param Model: Model of a Query
#
def Insert(self, Name, Modifier, Value, Model):
self.ID = self.ID + 1
SqlCommand = """insert into %s values(%s, '%s', '%s', '%s', %s)""" \
% (self.Table, self.ID, Name, Modifier, Value, Model)
Table.Insert(self, SqlCommand)
return self.ID
| bsd-2-clause | 5,991,313,910,364,187,000 | 34.695652 | 96 | 0.530016 | false |
asherbar/json-plus-plus | jpp/cli_test/cli_test.py | 1 | 3222 | import os
import shutil
import unittest
from collections import namedtuple
from io import StringIO
from jpp.cli import main as cli_entry_point
CURR_DIR = os.path.dirname(os.path.realpath(__file__))
class TestCli(unittest.TestCase):
TMP_TEST_FILES = os.path.join(CURR_DIR, '__tmp__')
_dir_bk = None
@classmethod
def setUpClass(cls):
FileDef = namedtuple('FileDef', ('name', 'contents', 'sub_path'))
required_files = (
FileDef('compact_test.jpp', '{\n"many": 1, \n"lines": 2\n}', ''),
FileDef('main.jpp', '', ''),
FileDef('other.jpp', '', ''),
FileDef('user_input_test.jpp', '{"foo": user_input["bar"]}', ''),
FileDef('sub_main.jpp', '', ''),
FileDef('sub_other.jpp', '', 'sub_path'),
FileDef('unresolved.jpp', '{"foo": local["bar"]}', ''),
)
os.mkdir(cls.TMP_TEST_FILES)
for file_def in required_files:
if file_def.sub_path:
os.mkdir(os.path.join(cls.TMP_TEST_FILES, file_def.sub_path))
file_path = os.path.join(cls.TMP_TEST_FILES, file_def.sub_path, file_def.name)
else:
file_path = os.path.join(cls.TMP_TEST_FILES, file_def.name)
with open(file_path, 'w') as fp:
fp.write(file_def.contents)
cls._dir_bk = os.getcwd()
os.chdir(cls.TMP_TEST_FILES)
@classmethod
def tearDownClass(cls):
os.chdir(cls._dir_bk)
shutil.rmtree(cls.TMP_TEST_FILES)
def test_no_args(self):
out_file_object = StringIO()
cli_entry_point([], out_file_object)
out_file_object.seek(0)
self.assertEqual(out_file_object.read(), '{}')
def test_parse_specific_file(self):
out_file_object = StringIO()
cli_entry_point(['other.jpp'], out_file_object)
out_file_object.seek(0)
self.assertEqual(out_file_object.read(), '{}')
def test_path_option(self):
out_file_object = StringIO()
cli_entry_point(['--path', '["{}"]'.format(os.path.join(self.TMP_TEST_FILES, 'sub_path')), 'sub_main.jpp'],
out_file_object)
out_file_object.seek(0)
self.assertEqual(out_file_object.read(), '{}')
def test_compact_path(self):
out_file_object = StringIO()
cli_entry_point(['--compact-print', 'compact_test.jpp'], out_file_object)
out_file_object.seek(0)
self.assertIn(out_file_object.read(), ('{"lines":2,"many":1}', '{"many":1,"lines":2}'))
def test_user_input(self):
out_file_object = StringIO()
cli_entry_point(['--compact-print', '--user-input', '{"bar": "baz"}', 'user_input_test.jpp'],
out_file_object)
out_file_object.seek(0)
self.assertEqual(out_file_object.read(), '{"foo":"baz"}')
def test_loose_mode(self):
out_file_object = StringIO()
cli_entry_point(['--compact-print', '--loose-mode', 'unresolved.jpp'],
out_file_object)
out_file_object.seek(0)
self.assertEqual(out_file_object.read(), '{"foo":"<Local: [bar]>"}')
def main():
unittest.main()
if __name__ == '__main__':
main()
| mit | -7,050,555,940,357,168,000 | 34.8 | 115 | 0.557418 | false |
commonsense/conceptdb | conceptdb/test/test_freebase3.py | 1 | 3665 | from conceptdb.freebase_imports import MQLQuery
from conceptdb.assertion import Assertion
from conceptdb.metadata import Dataset
from mongoengine.queryset import DoesNotExist
import freebase
import conceptdb
def test_freebase_allresults():
Assertion.drop_collection()
query_args = {'id':'/en/the_beatles', 'type':'/music/artist'}
result_args = ['*']
q = MQLQuery.make(query_args, result_args)
q.get_results('/data/test')
for a in Assertion.objects:
print str(a.arguments)
print str(a.relation)
Assertion.drop_collection()
def test_freebase_resargs():
Assertion.drop_collection()
query_args = {'id':'/en/the_beatles'}
result_args = ['*']
q = MQLQuery.make(query_args, result_args)
q.get_results('/data/test')
for a in Assertion.objects:
print str(a.arguments)
print str(a.relation)
Assertion.drop_collection()
def test_get_props():
q = MQLQuery.make({'id':'/en/the_beatles','type':'/music/artist'}, ['*'])
print MQLQuery.view_props(q.query_args)
def test_get_entities():
property = 'type'
q = MQLQuery.make({'id':'/en/the_beatles','type':'/music/artist'}, ['*'])
print MQLQuery.view_entities(q.query_args, property)
def test_import_all():
Assertion.drop_collection()
q = MQLQuery.make({'id':'/en/the_beatles'}, ['*'])
assertions = q.get_results('/data/test',1,None,'nholm',True)
for a in Assertion.objects:
print a.relation
#
# mss = freebase.HTTPMetawebSession('http://api.freebase.com')
#
# query = [{"*":{},"id":"/en/the_beatles","type":"/music/artist"}]
#
# results = mss.mqlread(query)
#
# print results
Assertion.drop_collection()
def test_create_or_vote():
q = MQLQuery.make({'id':'/en/the_beatles'}, ['*'])
Assertion.drop_collection()
assertions = q.get_results('/data/test','nholm', 1,None,False)
print str(len(assertions))
assertions2 = q.get_results('/data/test','nholm', 1,None,False)
print str(len(assertions2))
count = 0
for a in Assertion.objects:
count += 1
print a.arguments
print count
Assertion.drop_collection()
def test_import_traversing():
Assertion.drop_collection()
Dataset.drop_collection()
q = MQLQuery.make({'mid':'/m/0p_47'},['*'])
# 'mid':'/m/0p_47'
q.get_results('/data/test', 'nholm', 1, None, True, 'mid')
#print 'DONE WITH GET RESULTS'
for a in Assertion.objects:
print a.relation
print a.arguments
Assertion.drop_collection()
Dataset.drop_collection()
def test_datadumpread(filename):
dump = open(filename, "r")
count = 0
for line in dump:
#print line
# ADDED: lines 0-200
if count <100:
print count
count += 1
continue
else:
print line.split()[0]
q = MQLQuery.make({'mid':line.split()[0]},['*'])
q.get_results('/data/freebase', 'nholm', 1, None, True, 'mid')
count += 1
if count > 200:
break
dump.close()
if __name__ == "__main__":
conceptdb.connect_to_mongodb('conceptdb')
print len(Assertion.objects)
prev_len = len(Assertion.objects)
test_datadumpread("freebase-simple-topic-dump.tsv")
#test_import_traversing()
print '%d assertions made.'%(len(Assertion.objects)-prev_len)
#for a in Assertion.objects:
# print a.relation
# print a.arguments
| gpl-2.0 | 1,360,744,950,240,445,700 | 23.44 | 77 | 0.580355 | false |
foursquare/fsqio | scripts/fsqio/python3-port-utils/pants/remove_builtins.py | 1 | 3393 | #!/usr/bin/env python3
import argparse
import subprocess
from pathlib import Path
from textwrap import dedent
from typing import List, Sequence, Set
def main() -> None:
folders = create_parser().parse_args().folders
for fp in get_files_with_import(folders):
remove_builtins(file_path=fp)
if safe_to_remove_future_from_build(file_path=fp):
target_name = determine_pants_target_name(file_path=fp)
update_build_dependencies(file_path=fp, pants_target_name=target_name)
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description='Remove `from builtins import x`, and possibly the BUILD entry for `future`.')
parser.add_argument('folders', nargs='*')
return parser
def get_files_with_import(folders: Sequence[str]) -> Set[Path]:
return {
fp
for folder in folders
for fp in Path(folder).rglob("*.py")
if not fp.name.endswith("__init__.py")
and "from builtins import" in fp.read_text()
}
def determine_pants_target_name(file_path: Path) -> str:
file_map = subprocess.run([
'./pants',
'filemap',
f'{file_path.parent}:'
], stdout=subprocess.PIPE, encoding="utf-8").stdout.strip().split('\n')
target_entry = next((line for line in file_map if file_path.name in line), None)
if target_entry is None:
raise SystemExit(dedent(f"""\n
ERROR: File '{file_path}' invalid. Not found anywhere in {file_path.parent}/BUILD."""))
pants_target_path = target_entry.split(' ')[1]
pants_target_name = pants_target_path.split(':')[1]
return pants_target_name
def remove_builtins(*, file_path: Path) -> None:
lines = file_path.read_text().splitlines()
builtins_line_index = next(
(i for i, line in enumerate(lines) if "from builtins" in line), None
)
if builtins_line_index:
lines.pop(builtins_line_index)
file_path.write_text("\n".join(lines) + "\n")
def safe_to_remove_future_from_build(*, file_path: Path) -> bool:
lines = file_path.read_text().splitlines()
return all(
"from future.utils" not in line and
"from future.moves" not in line
for line in lines
)
def _find_target_index_in_build(
*, build_lines: List[str], pants_target_name: str, file_name: str
) -> int:
index = next((i for i, line in enumerate(build_lines)
if f"name = '{pants_target_name}'" in line
or f"name='{pants_target_name}'" in line),
None)
if index is None: # mono-target
index = next((i for i, line in enumerate(build_lines) if file_name in line), None)
if index is None: # only one target block in file, and sources aren't specified
index = next(i for i, line in enumerate(build_lines) if 'python_' in line and '(' in line)
return index
def update_build_dependencies(*, file_path: Path, pants_target_name: str) -> None:
build_file: Path = file_path.parent / "BUILD"
lines = build_file.read_text().splitlines()
target_index = _find_target_index_in_build(
build_lines=lines, pants_target_name=pants_target_name, file_name=file_path.name
)
future_line_index = next(
(i for i, line in enumerate(lines[target_index:]) if '3rdparty/python:future' in line), None
)
if future_line_index:
lines.pop(future_line_index + target_index)
build_file.write_text("\n".join(lines) + "\n")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| apache-2.0 | -7,298,757,472,970,914,000 | 31.941748 | 96 | 0.666667 | false |
useblocks/groundwork | groundwork/plugins/gw_recipes_builder.py | 1 | 3282 | # -*- coding: utf-8 -*-
import os
from click import Argument
from groundwork.patterns import GwCommandsPattern, GwRecipesPattern
class GwRecipesBuilder(GwCommandsPattern, GwRecipesPattern):
"""
Provides commands for listing and building recipes via command line interface.
Provided commands:
* recipe_list
* recipe_build
Provides also the recipe **gw_package**, which can be used to setup a groundwork related python package.
Content of the package:
* setup.py: Preconfigured and ready to use.
* groundwork package structure: Directories for applications, patterns, plugins and recipes.
* Simple, runnable example of a groundwork application and plugins.
* usable test, supported by py.test and tox.
* expandable documentation, supported by sphinx and the groundwork sphinx template.
* .gitignore
This code is hardly based on Cookiecutter's main.py file:
https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/main.py
"""
def __init__(self, *args, **kwargs):
self.name = kwargs.get("name", self.__class__.__name__)
super(GwRecipesBuilder, self).__init__(*args, **kwargs)
def activate(self):
self.commands.register("recipe_list", "Lists all recipes", self._recipe_list)
self.commands.register("recipe_build", "Builds a given recipe", self._recipe_build,
params=[Argument(("recipe",), required=True)])
self.recipes.register("gw_package",
os.path.abspath(os.path.join(os.path.dirname(__file__), "../recipes/gw_package")),
description="Groundwork basic package. Includes places for "
"apps, plugins, patterns and recipes.",
final_words="Recipe Installation is done.\n\n"
"During development use buildout:\n"
"Run: python bootstrap.py\n"
"Then: bin/buildout\n"
"Start the app: bin/app\n\n"
"For installation run: 'python setup.py install' \n"
"For documentation run: 'make html' inside doc folder "
"(after installation!)\n\n"
"For more information, please take a look into the README file "
"to know how to go on.\n"
"For help visit: https://groundwork.readthedocs.io\n\n"
"Have fun with your groundwork package.")
def deactivate(self):
pass
def _recipe_list(self):
print("Recipes:")
for key, recipe in self.app.recipes.get().items():
print(" %s by plugin '%s' - %s" % (recipe.name, recipe.plugin.name, recipe.description))
def _recipe_build(self, recipe):
recipe_obj = self.app.recipes.get(recipe)
if recipe_obj is None:
print("Recipe %s not found." % recipe)
else:
recipe_obj.build(no_input=False, extra_context=None)
| mit | -8,096,438,655,980,942,000 | 45.885714 | 112 | 0.555454 | false |
theY4Kman/neoalchemy | neoalchemy/util/langhelpers.py | 1 | 2116 | from . import compat
class _symbol(int):
def __new__(self, name, doc=None, canonical=None):
"""Construct a new named symbol."""
assert isinstance(name, compat.string_types)
if canonical is None:
canonical = hash(name)
v = int.__new__(_symbol, canonical)
v.name = name
if doc:
v.__doc__ = doc
return v
def __reduce__(self):
return symbol, (self.name, "x", int(self))
def __str__(self):
return repr(self)
def __repr__(self):
return "symbol(%r)" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = compat.threading.Lock()
def __new__(cls, name, doc=None, canonical=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc, canonical)
return sym
finally:
symbol._lock.release()
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order += 1
NoneType = type(None)
| mit | 479,101,455,911,698,600 | 25.78481 | 75 | 0.606333 | false |
gaetano-guerriero/eyeD3-debian | src/eyed3/__init__.py | 1 | 1368 | # -*- coding: utf-8 -*-
import sys
import locale
from .__about__ import __version__ as version
_DEFAULT_ENCODING = "latin1"
LOCAL_ENCODING = locale.getpreferredencoding(do_setlocale=True)
"""The local encoding, used when parsing command line options, console output,
etc. The default is always ``latin1`` if it cannot be determined, it is NOT
the value shown."""
if not LOCAL_ENCODING or LOCAL_ENCODING == "ANSI_X3.4-1968": # pragma: no cover
LOCAL_ENCODING = _DEFAULT_ENCODING
LOCAL_FS_ENCODING = sys.getfilesystemencoding()
"""The local file system encoding, the default is ``latin1`` if it cannot be
determined."""
if not LOCAL_FS_ENCODING: # pragma: no cover
LOCAL_FS_ENCODING = _DEFAULT_ENCODING
class Error(Exception):
"""Base exception type for all eyed3 errors."""
def __init__(self, *args):
super(Error, self).__init__(*args)
if args:
# The base class will do exactly this if len(args) == 1,
# but not when > 1. Note, the 2.7 base class will, 3 will not.
# Make it so.
self.message = args[0]
from .utils.log import log # noqa: E402
from .core import load # noqa: E402
del sys
del locale
__all__ = ["log", "load", "version", "LOCAL_ENCODING", "LOCAL_FS_ENCODING",
"Error"]
| gpl-3.0 | -6,633,266,518,445,879,000 | 34.076923 | 80 | 0.614766 | false |
virantha/photokeeper | photokeeper/flickr.py | 1 | 10779 | # -*- coding: utf-8 -*-
# Copyright 2016 Virantha Ekanayake All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
import logging
import yaml, pprint
import flickrapi
import urllib.request
from xml.etree import ElementTree
from tqdm import tqdm
import itertools, dateparser, time
from photokeeper.target import TargetBase
class FileWithCallback(object):
def __init__(self, filename):
self.file = open(filename, 'rb')
# the following attributes and methods are required
self.len = os.path.getsize(filename)
self.fileno = self.file.fileno
self.tell = self.file.tell
self.tqdm = tqdm(total=self.len, ncols=60,unit_scale=True, unit='B')
def read(self, size):
self.tqdm.update(size)
return self.file.read(size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.tqdm.close()
class FlickrMedia(object):
def __init__(self, json_dict):
self.json_dict = json_dict
self.title = json_dict['title']
self.photoid = json_dict['id']
dt = json_dict['datetaken']
self.datetime_taken = dateparser.parse(dt, date_formats=['%Y-%m-%d %H:%M:%S'])
class PhotoSet(object):
def __init__(self, json_dict):
self.json_dict = json_dict
self.title = json_dict['title']['_content']
self.setid = json_dict['id']
self.photos = None
class Photo(object):
def __init__(self, photo_element):
"""Construct a photo object out of the XML response from Flickr"""
attrs = { 'farm': 'farmid', 'server':'serverid','id':'photoid','secret':'secret'}
for flickr_attr, py_attr in attrs.items():
setattr(self, py_attr, photo_element.get(flickr_attr))
def _construct_flickr_url(self):
url = "http://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (self.farmid,self.serverid, self.photoid, self.secret)
return url
def download_photo(self, dirname, cache=False, tgt_filename=None):
if not os.path.exists(dirname):
os.makedirs(dirname)
tgt = os.path.join(dirname, "%s.jpg" % self.photoid)
if cache:
if os.path.isfile(tgt):
return tgt
urllib.request.urlretrieve(self._construct_flickr_url(), tgt)
return tgt
class Flickr(TargetBase):
def __init__(self):
self.set_keys(*self.read_keys())
self.get_auth2()
# Might as well get all the photosets at this point as we'll need them
self.photosets = self._get_photosets()
def read_keys(self):
"""
Read the flickr API key and secret from a local file
"""
with open("flickr_api.yaml") as f:
api = yaml.load(f)
return (api["key"], api["secret"])
def set_keys(self, key, secret):
self.api_key = key
self.api_secret = secret
def get_auth2(self):
print("Authenticating to Flickr")
self.flickr = flickrapi.FlickrAPI(self.api_key, self.api_secret)
self.flickr.authenticate_via_browser(perms='write')
print("Authentication succeeded")
return
def get_tagged(self, tags, count, download_dir="photos"):
""" Get photos with the given list of tags
"""
print ("connecting to flickr, and getting %d photos with tags %s" % (count, tags))
x = self.flickr.photos_search(api_key = self.api_key, user_id="me", tags=','.join(tags), per_page=count)
photos = self._extract_photos_from_xml(x)
photo_filenames = self._sync_photos(photos, download_dir)
print("Found %d photos" % len(photos))
return photo_filenames
def _sync_photos(self, photos, download_dir="photos", clean_up=False):
"""
Connect to flickr, and for each photo in the list, download.
Then, if delete photos that are present locally that weren't present in the list of photos.
:returns: List of filenames downloaded
"""
photo_filenames = []
photo_count = len(photos)
for i,photo in enumerate(photos):
print("[%d/%d] Downloading %s from flickr" % (i,photo_count,photo.photoid))
filename = photo.download_photo(download_dir, cache=True)
photo_filenames.append(filename)
# Now, go through and clean up directory if required
if clean_up:
photo_file_list = ["%s.jpg" % (x.photoid) for x in photos]
for fn in os.listdir(download_dir):
full_fn = os.path.join(download_dir, fn)
if os.path.isfile(full_fn):
if not fn in photo_file_list:
print ("Flickr sync: Deleting file %s" % fn)
os.remove(full_fn)
return photo_filenames
def _extract_photos_from_xml(self, xml):
photos = []
for i in xml.iter():
if i.tag == 'rsp':
# the response header. stat member should be 'ok'
if i.get('stat') == 'ok':
continue
else:
# error, so just break
break
if i.tag == 'photo':
photos.append(Photo(i))
return photos
def get_recent(self,count, download_dir="photos"):
""" get the most recent photos
"""
print ("connecting to flickr, and getting most recent %d photos" % count)
x = self.flickr.people_getphotos(api_key = self.api_key, user_id="me",per_page=count)
#x = self.flickr.photos_search(api_key=self.api_key,"me")
photos = self._extract_photos_from_xml(x)
photo_filenames = self._sync_photos(photos, download_dir)
return photo_filenames
def _get_photosets(self):
print("Getting photosets from Flickr")
resp = self.flickr.photosets.getList(format='parsed-json')
photosets = {}
for photoset in resp['photosets']['photoset']:
p = PhotoSet(photoset)
photosets[p.title] = p #TODO: Possible issue here because multiple photosets could have same title. Oh well
return photosets
def _get_photos_in_album(self, album_name, cached=False):
photoset = self.photosets[album_name]
albumid = photoset.setid
if not photoset.photos or not cached:
resp = self.flickr.photosets.getPhotos(photoset_id=albumid, extras='date_taken', format='parsed-json')
photos = {}
for p in resp['photoset']['photo']:
myphoto = FlickrMedia(p)
photos[myphoto.title] = myphoto
photoset.photos = photos
return photoset.photos
def _upload_file(self, filename):
with FileWithCallback(filename) as f:
resp = self.flickr.upload(filename=filename, fileobj=f, is_public=0)
photoid = resp.find('photoid').text
return photoid
def _create_new_album(self, album_name, first_photo_filename):
# First, we need to upload a dummy photo
photoid = self._upload_file(first_photo_filename)
resp = self.flickr.photosets.create(title=album_name, primary_photo_id=photoid, format='parsed-json')
albumid = resp['photoset']['id']
resp = self.flickr.photosets.getInfo(photoset_id=albumid, format='parsed-json')
return (photoid, resp['photoset'])
def _add_photo_to_album(self, photoid, albumid):
#tqdm.write("Adding {} to {} ".format(photoid, albumid))
self.flickr.photosets.addPhoto(photoset_id=albumid, photo_id=photoid)
def _is_duplicate(self, image):
album_name = image.tgtdatedir
if not album_name in self.photosets:
return False
else:
photos = self._get_photos_in_album(album_name, cached=True)
image_title = os.path.basename(image.filename)
if not image_title in photos: # If photo with same title is not found, then no duplicates
return False
else:
# Same title, but let's check the date too, to be sure
#tqdm.write('{} has local date {}, and flickr date {}'.format(image_title, image.datetime_taken, photos[image_title].datetime_taken))
if photos[image_title].datetime_taken != image.datetime_taken:
return False
else:
return True
def check_duplicates(self, images):
print("Checking for duplicates in Flickr")
images_1, images_2 = itertools.tee(images)
for total,img in enumerate(images_1):
if self._is_duplicate(img):
img.flickr_dup = True
n_dups = [i for i in images_2 if i.flickr_dup]
print('Found {} duplicates out of {} images'.format(len(n_dups), total+1))
def execute_copy(self, images):
for img in images:
if img.flickr_dup: continue
album_name = img.tgtdatedir
if album_name not in self.photosets:
# Need to create album
tqdm.write('Creating new album %s' % album_name)
photoid, album_dict = self._create_new_album(album_name, img.srcpath)
p = PhotoSet(album_dict)
self.photosets[p.title] = p
else:
photoid = self._upload_file(img.srcpath)
self._add_photo_to_album(photoid, self.photosets[album_name].setid)
tqdm.write("Adding {} to {} ".format(img.filename, album_name))
# Now, make sure we set the date-taken manually if no exif information
if img.exif_timestamp_missing:
dt = img.datetime_taken.strftime('%Y-%m-%d %H:%M:%S')
tqdm.write('Manually setting date on video {} to {}'.format(img.filename, dt))
self.flickr.photos.setDates(photo_id=photoid, date_taken=dt)
def main():
#logging.basicConfig(level=logging.DEBUG, format='%(message)s')
script = Flickr()
#script.get_recent(10)
#script.upload('test.jpg')
script.flickr.photos.setDates(photoid='30735623495', date_taken='2016-06-24 10:12:02')
if __name__ == '__main__':
main()
| apache-2.0 | 7,724,130,514,751,252,000 | 35.788396 | 149 | 0.598386 | false |
ufal/neuralmonkey | neuralmonkey/tests/test_model_part.py | 1 | 3306 | #!/usr/bin/env python3.5
"""Test ModelPart class."""
import os
import tempfile
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import tensorflow as tf
from neuralmonkey.vocabulary import Vocabulary
from neuralmonkey.encoders.recurrent import SentenceEncoder
from neuralmonkey.model.sequence import EmbeddedSequence
class Test(unittest.TestCase):
"""Test capabilities of model part."""
@classmethod
def setUpClass(cls):
tf.reset_default_graph()
cls.dataset = {
"id": tf.constant([["hello", "world"], ["test", "this"]]),
"data_id": tf.constant([["A", "B", "C"], ["D", "E", "F"]])}
def test_reuse(self):
vocabulary = Vocabulary(["a", "b"])
seq1 = EmbeddedSequence(
name="seq1",
vocabulary=vocabulary,
data_id="id",
embedding_size=10)
seq1.register_input(self.dataset)
seq2 = EmbeddedSequence(
name="seq2",
vocabulary=vocabulary,
embedding_size=10,
data_id="id")
seq2.register_input(self.dataset)
seq3 = EmbeddedSequence(
name="seq3",
vocabulary=vocabulary,
data_id="id",
embedding_size=10,
reuse=seq1)
seq3.register_input(self.dataset)
# blessing
self.assertIsNotNone(seq1.embedding_matrix)
self.assertIsNotNone(seq2.embedding_matrix)
self.assertIsNotNone(seq3.embedding_matrix)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
params = sess.run((seq1.embedding_matrix, seq2.embedding_matrix,
seq3.embedding_matrix))
with self.assertRaises(AssertionError):
assert_array_equal(params[0], params[1])
assert_array_equal(params[0], params[2])
def test_save_and_load(self):
"""Try to save and load encoder."""
vocabulary = Vocabulary(["a", "b"])
checkpoint_file = tempfile.NamedTemporaryFile(delete=False)
checkpoint_file.close()
encoder = SentenceEncoder(
name="enc", vocabulary=vocabulary, data_id="data_id",
embedding_size=10, rnn_size=20, max_input_len=30,
save_checkpoint=checkpoint_file.name,
load_checkpoint=checkpoint_file.name)
encoder.input_sequence.register_input(self.dataset)
# NOTE: This assert needs to be here otherwise the model has
# no parameters since the sentence encoder is initialized lazily
self.assertIsInstance(encoder.temporal_states, tf.Tensor)
encoders_variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="enc")
sess_1 = tf.Session()
sess_1.run(tf.global_variables_initializer())
encoder.save(sess_1)
sess_2 = tf.Session()
sess_2.run(tf.global_variables_initializer())
encoder.load(sess_2)
values_in_sess_1 = sess_1.run(encoders_variables)
values_in_sess_2 = sess_2.run(encoders_variables)
self.assertTrue(
all(np.all(v1 == v2) for v1, v2 in
zip(values_in_sess_1, values_in_sess_2)))
os.remove(checkpoint_file.name)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 3,915,043,333,148,252,700 | 29.330275 | 72 | 0.607381 | false |
oaubert/advene | lib/advene/gui/plugins/packageimporter.py | 1 | 8731 | #
# Advene: Annotate Digital Videos, Exchange on the NEt
# Copyright (C) 2008-2017 Olivier Aubert <[email protected]>
#
# Advene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Advene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Advene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""GUI to import packages.
It focuses on importing whole sets of elements: either views,
resources, or whole annotations (with their types) without
overwriting/merging with existing elements.
A common usage scenario is to be able to compare annotations for the
same document but edited by 2 persons using the same schema, by
importing annotations from User2, suffixing his annotation types with
his name.
"""
import logging
logger = logging.getLogger(__name__)
from gettext import gettext as _
from gi.repository import Gtk
from gi.repository import GObject
from gi.repository import Pango
from advene.gui.util import dialog
from advene.gui.views import AdhocView
from advene.util.merger import Differ
name="Package importer view plugin"
def register(controller):
controller.register_viewclass(PackageImporter)
class TreeViewImporter:
COLUMN_ELEMENT=0
COLUMN_APPLY=1
COLUMN_ELEMENT_NAME=2
def __init__(self, controller=None, sourcepackage=None, destpackage=None):
self.controller = controller
self.package = sourcepackage
self.destpackage = destpackage
self.store = self.build_liststore()
self.widget = self.build_widget()
def build_liststore(self):
# Store reference to the element, string representation (title and id)
# and boolean indicating wether it is imported or not
store = Gtk.ListStore(
GObject.TYPE_PYOBJECT,
GObject.TYPE_BOOLEAN,
GObject.TYPE_STRING,
)
for at in self.package.annotationTypes:
store.append(row=[ at,
True,
"%s (%d)" % (self.controller.get_title(at),
len(at.annotations)) ])
return store
def toggle_selection(self):
"""Toggle all elements from the current selection.
"""
def toggle_row(model, path, it, data=None):
model.set_value(it, self.COLUMN_APPLY, not model.get_value(it, self.COLUMN_APPLY))
self.widget.get_selection().selected_foreach(toggle_row)
return True
def build_widget(self):
treeview = Gtk.TreeView(model=self.store)
treeview.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
treeview.set_headers_clickable(True)
treeview.set_enable_search(False)
renderer = Gtk.CellRendererToggle()
renderer.set_property('activatable', True)
column = Gtk.TreeViewColumn(_('Import?'), renderer,
active=self.COLUMN_APPLY)
column.set_sort_column_id(self.COLUMN_APPLY)
def toggled_cb(renderer, path, model, column):
model[path][column] = not model[path][column]
return True
renderer.connect('toggled', toggled_cb, self.store, self.COLUMN_APPLY)
treeview.append_column(column)
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(_('Element'), renderer,
text=self.COLUMN_ELEMENT_NAME)
column.set_resizable(True)
column.set_max_width(300)
column.set_sort_column_id(self.COLUMN_ELEMENT_NAME)
treeview.append_column(column)
return treeview
class PackageImporter(AdhocView):
view_name = _("Package importer view")
view_id = 'packageimporter'
tooltip=_("Display package import interface")
def __init__(self, controller=None, parameters=None, sourcepackage=None, destpackage=None):
super().__init__(controller=controller)
self.close_on_package_load = True
self.contextual_actions = ()
self.controller=controller
opt, arg = self.load_parameters(parameters)
self.sourcepackage=sourcepackage
self.destpackage=destpackage
self.widget=self.build_widget()
def build_widget(self):
self.mergerview = TreeViewImporter(controller=self.controller, sourcepackage=self.sourcepackage, destpackage=self.destpackage)
vbox=Gtk.VBox()
label = Gtk.Label(_("Import annotations from %(source)s into %(dest)s") % {'source': self.sourcepackage.uri,
'dest': self.destpackage.uri})
label.set_ellipsize(Pango.EllipsizeMode.MIDDLE)
vbox.pack_start(label, False, False, 0)
hbox = Gtk.HBox()
self.suffix_entry = Gtk.Entry()
self.suffix_entry.set_text("IMPORTED")
hbox.pack_start(Gtk.Label(_("Suffix to append to created types")), False, False, 0)
hbox.pack_start(self.suffix_entry, True, True, 0)
vbox.pack_start(hbox, False, False, 0)
scroll_win = Gtk.ScrolledWindow ()
scroll_win.set_policy (Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
vbox.add(scroll_win)
scroll_win.add(self.mergerview.widget)
self.buttonbox = Gtk.HButtonBox()
def validate(b):
m = self.mergerview.store
suffix = self.suffix_entry.get_text().strip()
if not suffix:
dialog.message_dialog(_("The suffix cannot be empty."), icon=Gtk.MessageType.ERROR)
return True
annotation_count = 0
type_count = 0
# Let's use differ methods to copy elements
differ = Differ(source=self.sourcepackage, destination=self.destpackage, controller=self.controller)
batch_id=object()
for l in m:
if l[self.mergerview.COLUMN_APPLY]:
source_at = l[self.mergerview.COLUMN_ELEMENT]
logger.debug("Copying %s (%d annotations)", source_at.title, len(source_at.annotations))
type_count += 1
dest_at = differ.copy_annotation_type(source_at, generate_id=True)
dest_at.title = "%s %s" % (dest_at.title, suffix)
self.controller.notify('AnnotationTypeCreate', annotationtype=dest_at, immediate=True, batch=batch_id)
for a in source_at.annotations:
annotation_count += 1
# Since we copied the annotation type before, copy_annotation should use the translated name
new_a = differ.copy_annotation(a, generate_id=True)
self.controller.notify('AnnotationCreate', annotation=new_a, immediate=True, batch=batch_id)
logger.info(_("Copied %(count)d annotations from %(tcount)d types") % { "count": annotation_count, "tcount": type_count })
self.close()
return True
def select_all(b):
model=self.mergerview.store
for l in model:
l[self.mergerview.COLUMN_APPLY] = True
return True
def unselect_all(b):
model=self.mergerview.store
for l in model:
l[self.mergerview.COLUMN_APPLY] = False
return True
def toggle_selection(b):
self.mergerview.toggle_selection()
return True
b = Gtk.Button(_("All"))
b.set_tooltip_text(_("Check all items"))
b.connect('clicked', select_all)
self.buttonbox.add (b)
b = Gtk.Button(_('None'))
b.set_tooltip_text(_("Uncheck all items"))
b.connect('clicked', unselect_all)
self.buttonbox.add (b)
b = Gtk.Button(_('Selection'))
b.set_tooltip_text(_("Toggle checked state on selected lines"))
b.connect('clicked', toggle_selection)
self.buttonbox.add (b)
b = Gtk.Button(stock=Gtk.STOCK_OK)
b.connect('clicked', validate)
self.buttonbox.add (b)
b = Gtk.Button(stock=Gtk.STOCK_CANCEL)
b.connect('clicked', lambda b: self.close())
self.buttonbox.add (b)
vbox.pack_start(self.buttonbox, False, True, 0)
return vbox
| gpl-2.0 | -7,850,985,333,841,483,000 | 37.632743 | 134 | 0.624327 | false |
pika/pika | pika/frame.py | 1 | 7744 | """Frame objects that do the frame demarshaling and marshaling."""
import logging
import struct
from pika import amqp_object
from pika import exceptions
from pika import spec
from pika.compat import byte
LOGGER = logging.getLogger(__name__)
class Frame(amqp_object.AMQPObject):
"""Base Frame object mapping. Defines a behavior for all child classes for
assignment of core attributes and implementation of the a core _marshal
method which child classes use to create the binary AMQP frame.
"""
NAME = 'Frame'
def __init__(self, frame_type, channel_number):
"""Create a new instance of a frame
:param int frame_type: The frame type
:param int channel_number: The channel number for the frame
"""
self.frame_type = frame_type
self.channel_number = channel_number
def _marshal(self, pieces):
"""Create the full AMQP wire protocol frame data representation
:rtype: bytes
"""
payload = b''.join(pieces)
return struct.pack('>BHI', self.frame_type, self.channel_number,
len(payload)) + payload + byte(spec.FRAME_END)
def marshal(self):
"""To be ended by child classes
:raises NotImplementedError
"""
raise NotImplementedError
class Method(Frame):
"""Base Method frame object mapping. AMQP method frames are mapped on top
of this class for creating or accessing their data and attributes.
"""
NAME = 'METHOD'
def __init__(self, channel_number, method):
"""Create a new instance of a frame
:param int channel_number: The frame type
:param pika.Spec.Class.Method method: The AMQP Class.Method
"""
Frame.__init__(self, spec.FRAME_METHOD, channel_number)
self.method = method
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
pieces = self.method.encode()
pieces.insert(0, struct.pack('>I', self.method.INDEX))
return self._marshal(pieces)
class Header(Frame):
"""Header frame object mapping. AMQP content header frames are mapped
on top of this class for creating or accessing their data and attributes.
"""
NAME = 'Header'
def __init__(self, channel_number, body_size, props):
"""Create a new instance of a AMQP ContentHeader object
:param int channel_number: The channel number for the frame
:param int body_size: The number of bytes for the body
:param pika.spec.BasicProperties props: Basic.Properties object
"""
Frame.__init__(self, spec.FRAME_HEADER, channel_number)
self.body_size = body_size
self.properties = props
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
pieces = self.properties.encode()
pieces.insert(
0, struct.pack('>HxxQ', self.properties.INDEX, self.body_size))
return self._marshal(pieces)
class Body(Frame):
"""Body frame object mapping class. AMQP content body frames are mapped on
to this base class for getting/setting of attributes/data.
"""
NAME = 'Body'
def __init__(self, channel_number, fragment):
"""
Parameters:
- channel_number: int
- fragment: unicode or str
"""
Frame.__init__(self, spec.FRAME_BODY, channel_number)
self.fragment = fragment
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
return self._marshal([self.fragment])
class Heartbeat(Frame):
"""Heartbeat frame object mapping class. AMQP Heartbeat frames are mapped
on to this class for a common access structure to the attributes/data
values.
"""
NAME = 'Heartbeat'
def __init__(self):
"""Create a new instance of the Heartbeat frame"""
Frame.__init__(self, spec.FRAME_HEARTBEAT, 0)
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
return self._marshal(list())
class ProtocolHeader(amqp_object.AMQPObject):
"""AMQP Protocol header frame class which provides a pythonic interface
for creating AMQP Protocol headers
"""
NAME = 'ProtocolHeader'
def __init__(self, major=None, minor=None, revision=None):
"""Construct a Protocol Header frame object for the specified AMQP
version
:param int major: Major version number
:param int minor: Minor version number
:param int revision: Revision
"""
self.frame_type = -1
self.major = major or spec.PROTOCOL_VERSION[0]
self.minor = minor or spec.PROTOCOL_VERSION[1]
self.revision = revision or spec.PROTOCOL_VERSION[2]
def marshal(self):
"""Return the full AMQP wire protocol frame data representation of the
ProtocolHeader frame
:rtype: str
"""
return b'AMQP' + struct.pack('BBBB', 0, self.major, self.minor,
self.revision)
def decode_frame(data_in): # pylint: disable=R0911,R0914
"""Receives raw socket data and attempts to turn it into a frame.
Returns bytes used to make the frame and the frame
:param str data_in: The raw data stream
:rtype: tuple(bytes consumed, frame)
:raises: pika.exceptions.InvalidFrameError
"""
# Look to see if it's a protocol header frame
try:
if data_in[0:4] == b'AMQP':
major, minor, revision = struct.unpack_from('BBB', data_in, 5)
return 8, ProtocolHeader(major, minor, revision)
except (IndexError, struct.error):
return 0, None
# Get the Frame Type, Channel Number and Frame Size
try:
(frame_type, channel_number, frame_size) = struct.unpack(
'>BHL', data_in[0:7])
except struct.error:
return 0, None
# Get the frame data
frame_end = spec.FRAME_HEADER_SIZE + frame_size + spec.FRAME_END_SIZE
# We don't have all of the frame yet
if frame_end > len(data_in):
return 0, None
# The Frame termination chr is wrong
if data_in[frame_end - 1:frame_end] != byte(spec.FRAME_END):
raise exceptions.InvalidFrameError("Invalid FRAME_END marker")
# Get the raw frame data
frame_data = data_in[spec.FRAME_HEADER_SIZE:frame_end - 1]
if frame_type == spec.FRAME_METHOD:
# Get the Method ID from the frame data
method_id = struct.unpack_from('>I', frame_data)[0]
# Get a Method object for this method_id
method = spec.methods[method_id]()
# Decode the content
method.decode(frame_data, 4)
# Return the amount of data consumed and the Method object
return frame_end, Method(channel_number, method)
elif frame_type == spec.FRAME_HEADER:
# Return the header class and body size
class_id, weight, body_size = struct.unpack_from('>HHQ', frame_data)
# Get the Properties type
properties = spec.props[class_id]()
# Decode the properties
out = properties.decode(frame_data[12:])
# Return a Header frame
return frame_end, Header(channel_number, body_size, properties)
elif frame_type == spec.FRAME_BODY:
# Return the amount of data consumed and the Body frame w/ data
return frame_end, Body(channel_number, frame_data)
elif frame_type == spec.FRAME_HEARTBEAT:
# Return the amount of data and a Heartbeat frame
return frame_end, Heartbeat()
raise exceptions.InvalidFrameError("Unknown frame type: %i" % frame_type)
| bsd-3-clause | -8,649,238,038,116,597,000 | 28.333333 | 78 | 0.631586 | false |
simonprickett/wmataapiexperiments | train_predictions_script/pollStationData.py | 1 | 1887 | #####
# Script to poll API data for DC Metro station
#
# Author: Simon Prickett
#####
import json
import os
import requests
import sys
import time
#####
# Query the WMATA API for data for the station represented
# by stationCode e.g. N06 = Reston Wiehle East
#####
def getStationData(stationCode, apiKey):
url = 'https://wmataapibeta.azure-api.net/StationPrediction.svc/json/GetPrediction/' + stationCode + '?api_key=' + apiKey
res = requests.get(url)
return res.json()
#####
# Display error telling user to set up their WMATA_API_KEY
# environment variable
#####
def needToSetApiKey():
print 'You need to set an environment variable:'
print 'WMATA_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
print 'Before you can run this script.'
exit(1)
#####
# Display usage error message
#####
def usage():
print 'This script requires 2 parameters, a station code and a number of times to'
print 'query for data and a filename to store the results in.'
print 'Example: ' + sys.argv[0] + ' N06 2000'
exit(1)
#####
# Entry point
#####
apiKey = os.environ.get('WMATA_API_KEY', '')
if (len(apiKey) == 0):
needToSetApiKey()
if (len(sys.argv) == 4):
# Got the right number of arguments, is the second one an integer
numSamples = 0
try:
numSamples = int(sys.argv[2])
currentSample = 1
print 'Will take ' + sys.argv[2] + ' samples for ' + sys.argv[1] + ' and store in ' + sys.argv[3]
f = open(sys.argv[3], 'w')
f.write('[\n')
while (currentSample <= numSamples):
print sys.argv[1] + ' ' + str(currentSample) + ' of ' + str(numSamples)
json.dump(getStationData(sys.argv[1], apiKey), f)
currentSample += 1
# Do we need a comma or is this the last iteration?
if (currentSample <= numSamples):
f.write(',')
f.write('\n')
f.flush()
time.sleep(60)
f.write(']')
f.close()
except ValueError:
usage()
else:
# Incorrect invocation
usage()
| mit | 3,686,401,257,969,455,600 | 23.506494 | 122 | 0.666137 | false |
iFedix/FirewallController | modules/live.py | 1 | 19897 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import tcp
from ryu.lib.packet import udp
# INIZIO CLASSE
class Live(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(Live, self).__init__(*args, **kwargs)
self.mac_to_port = {}
#creo la mac address table (vedere sotto per dettagli). Si tratta di un dizionario che poi diventera' un dizionario di dizionari!
#Cioe' per esempio la mac table finale sara': mac_to_port = {1: {"00:00:00:02": 2, "00:00:00:01": 1}, 2: {"00:00:00:02": 1, "00:00:00:01":2}}
self.messages = []
self.communications = "" #sono tutte le cominicazioni registrate dal controller
self.currentroutes = [] #Lista GLOBALE (non viene mai eliminata) di informazioni sui collegamenti tra host che bisogna fare:
#es [00:00:00:00:00:01 00:00:00:00:00:02 ICMP, 00:00:00:00:00:05 00:00:00:00:00:07 HTTP]
#NB: non vengono inserite in questa lista le coppie duali (es 00:00:00:00:00:02 00:00:00:00:00:01 ICMP), perche' la comunicazione deve essere biunivoca
#vedere check per questo comportamento
self.story = [] #Lista di informazioni sui collegamenti tra host che bisogna fare
#es [00:00:00:00:00:01 00:00:00:00:00:02 ICMP, 00:00:00:00:00:05 00:00:00:00:00:07 HTTP]
#Differenza tra current routes e story: story e' una lista che serve a tener traccia dei collegamenti che bisogna fare. Una volta che un packet in nuovo entra, viene aggiunto a story una nuova entry che sara' poi eliminata quando il pacchetto viene accettato o rifiutato. Current routes e' una lista simile ma che non cancella i valori e ha un singolo valore per i pacchetti speculari (cioe' se entra 00:00:00:00:00:02 00:00:00:00:00:01 ICMP e poi 00:00:00:00:00:01 00:00:00:00:00:02 ICMP verra' aggiunta solo una entry). Serve a tener traccia delle comunicazioni gia' accettate. Infatti se il primo pacchetto e' stato accettato, currentroutes fa in modo che i percorsi intermedi verso il destinatario vengano automaticamente accettati (senza autorizzazione dell'utente). Funziona a mo di intent tramite una tabella globale.
# ---------------------METODI UTILI-----------------------------
def getProtocol(self, pkt):
pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)
tp = pkt.get_protocol(tcp.tcp)
port = 0
if tp:
port = tp.dst_port
ud = pkt.get_protocol(udp.udp)
if ud:
port = ud.dst_port
#print "PORTA: %s" % port
if pkt_ipv4:
protocol = pkt_ipv4.proto
if protocol==1:
return "ICMP"
if protocol==6:
if port==80:
return "HTTP"
if port==443:
return "HTTPS"
return "TCP"
if protocol==17:
if port==53:
return "DNS"
if port==67:
return "DHCP"
return "UDP"
return "Unknown. If you confirm, you will add a general traffic rule (= every type of traffic) between src and dst"
def getMatch(self, pkt, parser, in_port, dst):
pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)
tp = pkt.get_protocol(tcp.tcp)
port = 0
if tp:
port = tp.dst_port
ud = pkt.get_protocol(udp.udp)
if ud:
port = ud.dst_port
#print "PORTA: %s" % port
if pkt_ipv4:
protocol = pkt_ipv4.proto
if protocol==1:
return parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=1)
if protocol==6:
if port==80:
return parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=6, tcp_dst=80)
if port==443:
return parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=6, tcp_dst=443)
return parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=6)
if protocol==17:
if port==53:
parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=17, udp_dst=53)
if port==67:
parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=17, udp_dst=67)
return parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=17)
return parser.OFPMatch(in_port=in_port, eth_dst=dst)
#metodo per filtrare mac address in ingresso (=passano dal controller senza conferma dell'utente)
def filtered_ip(self, dst, eth):
#escludo i seguenti mac address dal filtraggio (passano normalmente):
#richieste ARP, Link Layer Discovery Protocol, Multicast (ipv6 e ipv), broadcast address
return eth.ethertype != 0x0806 and self.lldp_filter(dst) and self.ipv4_multicast_filter(dst) and self.ipv6_multicast_filter(dst) and dst != "ff:ff:ff:ff:ff:ff"
def lldp_filter(self, addr):
return addr != "01:80:c2:00:00:0e" and addr != "01:80:c2:00:00:03" and addr != "01:80:c2:00:00:00"
def ipv6_multicast_filter(self, addr):
#escludo mac da 33-33-00-00-00-00 a 33-33-FF-FF-FF-FF (vedere http://www.iana.org/assignments/ethernet-numbers/ethernet-numbers.xhtml)
return addr[:5]!="33:33"
def ipv4_multicast_filter(self, addr):
#escludo mac da 01-00-5E-00-00-00 a 01-00-5E-7F-FF-FF (vedere https://technet.microsoft.com/en-us/library/cc957928.aspx)
#print "valuto %s" % addr
if addr[:8]!="01:00:5e":
#print "TRUE"
return True
else:
val = addr[9]=='8' or addr[9]=='9' or addr[9]=='a' or addr[9]=='b' or addr[9]=='c' or addr[9]=='d' or addr[9]=='e' or addr[9]=='f'
#print "Sono nel secondo ramo: %s" % val
return val
#metodo che serve semplicemente per dire che 00:00:00:00:00:02 00:00:00:00:00:01 ICMP e' uguale a 00:00:00:00:00:01 00:00:00:00:00:02 ICMP
#perche' semplicemente e' il ritorno
def check(self, to_find): #es: to_find: 00:00:00:00:00:02 00:00:00:00:00:01 ICMP
add = to_find.split( ) #add e' una lista contenente due elementi (i due mac addr)
case1 = "%s %s %s" % (add[0], add[1], add[2])
#con queste operazioni costruisco due stringhe: 00:00:00:00:00:02 00:00:00:00:00:01 ICMP e 00:00:00:00:00:01 00:00:00:00:00:02 ICMP
case2 = "%s %s %s" % (add[1], add[0], add[2])
return (case1 in self.currentroutes or case2 in self.currentroutes) #esiste gia' una occorrenza ritorno true (sarebbe una route gia' autorizzata!)
#--------------------------------FUNZIONI PRINCIPALI--------------------------------------
def list_communications(self):
#prima rest api eseguita: notifica all'utente di una connessione nuova (nuovo packet in da un certo host ad un altro host)
actual = self.communications
self.communications = self.communications[self.communications.find('\n') + 1:] #elimino da communications il valore actual e lo faccio prendendo tutto cio' che c'e' dopo il primo \n (= svuoto communications)
#print "in coda: %s" % actual
# L'algoritmo seguente verifica che la generica coppia src e dst sia comparsa per la prima volta.
# ES: se h1 pinga h2 per la prima volta all'utente verra' notificato che e' in atto una conessione da per esempio h1 a h2.
# In una topologia con due switch e due host pero' (ma comunque vale anche per topologie piu' generiche) dovranno essere aggiunte 4 regole (4 pezzi di percorso):
# farsi disegnino della topologia per maggiore chiarezza!
# 1) da eth1 di s2 provenienti da h2 e diretti a h1 (tramite eth2)
# 2) da eth2 di s1 provenienti da h2 e diretti a h1 (tramite eth1)
# 3) da eth1 di s1 provenienti da h1 e diretti a h2 (tramite eth2)
# 4) da eth2 di s2 provenienti da h1 e diretti a h2 (tramite eth1)
# Con questo algoritmo alla prima richiesta (es: h1 ping h2) mi memorizzo la coppia h1-h2 (+relativo type)
# Gli altri pezzi di route (cioe' le altre regole) vengono percio' automaticamente inserite visto che sono che tutte riguardano la coppia h1-h2(+type)
if(actual!=''):
if self.check(actual[:actual.find('\n')]) == True: #serve per tagliare il \n finale: cioe' prende la sottostringa da 0 alla posizione dello \n esclusa
#print "ENTRY GIA' VISTA %s" % actual[:actual.find('\n')]
self.accept() #accetto gia'! e' riferita ad una coppia gia accettata dall'utente!
return "done"; #notifico lo script di js che non deve chiedere niente altro all'utente perche' essendo questo un packet intermedio
#per una connessione tra src e dst gia' autorizzata in precedenza, automaticamente aggiungo la flow nello switch
else:
#print "ENTRY MAI VISTA %s" % actual[:actual.find('\n')]
self.currentroutes.append(actual[:actual.find('\n')]) #se e' una coppia nuova chiedo all'utente che vuole fare, se accetta al prossimo passo le
#altre regole intermedie vengono aggiunte automaticamente
return actual
def accept(self):
datapath = self.messages[0].datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = self.messages[0].match['in_port']
dpid = datapath.id
pkt = packet.Packet(self.messages[0].data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
src = eth.src
dst = eth.dst
protocol = self.getProtocol(pkt)
key = "%s %s %s" % (src, dst, protocol)
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
#a seconda del pacchetto in ingresso e del suo tipo di traffico (ICMP, DNS.. ecc) installo una flow appropriata
match = self.getMatch(pkt, parser, in_port, dst);
#print(match)
actions = [parser.OFPActionOutput(out_port)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
#Se esiste un buffer_id (cioe' se i dati del pacchetto vengono memorizzati nello switch) allora occorre dare il riferimento al buffer (buffer_id)
#altrimenti non serve
#mod dice di inserire una openflow mod che utilizzi le istruzioni descritte sopra (applicare immediatamente il comportamente),
#le azioni (mandare sulla porta di uscita) e il match (installazione della regola appropriata a seconda del tipo di traffico)
if self.messages[0].buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=self.messages[0].buffer_id,
priority=1, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=1,
match=match, instructions=inst)
datapath.send_msg(mod)
if key in self.story:
self.story.remove(key)
#print "%s eliminata (sono in accept)!" % key
self.messages.pop(0) #rimuove dalla lista l'elemento 0
def deny(self):
datapath = self.messages[0].datapath
parser = datapath.ofproto_parser
in_port = self.messages[0].match['in_port']
pkt = packet.Packet(self.messages[0].data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
src = eth.src
dst = eth.dst
protocol = self.getProtocol(pkt)
key = "%s %s %s" % (src, dst, protocol)
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
#Se esiste un buffer_id (cioe' se i dati del pacchetto vengono memorizzati nello switch) allora occorre dare il riferimento al buffer (buffer_id)
#altrimenti non serve
#mod dice di inserire una openflow mod che droppi il pacchetto: infatti se negli argomenti non si specifica il campo instructions=inst (come nella accept),
#questo metodo crea una openflow mod che droppa le regole che fanno match (cioe' che entrano da una certa porta e destinate ad un certo mac address).
#Le successive richieste identiche verranno bloccate da questa regola qua inserita! L'unico modo per togliere la regola
#e' farlo manualmente sovrascrivendola attraverso l'inserimento manuale con il modulo tap.py
if self.messages[0].buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=self.messages[0].buffer_id,
priority=1, match=match)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=1,
match=match)
datapath.send_msg(mod)
if key in self.story:
self.story.remove(key)
#print "%s eliminata (sono in deny)!" % key
self.messages.pop(0)
#----------------------------GESTIONE DEGLI SWITCH-------------------------------------------
#a seguire un decoratore che mi dice come gestire la fase openflow della richesta delle funzioni dello switch.
#Specificamente, dopo aver ricevuto la reply dallo switch, viene aggiunto una table-miss flow, cioe' il comportamento
#di default per i pacchetti che arrivano allo switch e non hanno una flow (non sanno dove essere rediretti dallo switch).
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# Delete all existing rules on the switch
mod = parser.OFPFlowMod(datapath=datapath, command=ofproto.OFPFC_DELETE, out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY)
datapath.send_msg(mod)
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
#di default i pacchetti vengono mandati al controller con un OFPCML_NO_BUFFER.
#Il metodo OFPActionOutput serve ad indicare di mandare fuori il pacchetto con le regole OFPP_CONTROLLER (verso il controller)
#e OFPCML_NO_BUFFER (che si traduce nell'inviare tutto il pacchetto senza bufferizzare nulla)
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
#OFPIT_APPLY_ACTIONS si traduce in applicare immediatamente le azioni in actions
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
#con priorita' 0, fanno match tutti i pacchetti! Tutto e' inviato al controller
mod = parser.OFPFlowMod(datapath=datapath, priority=0, match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# con questo metodo raccolgo i packet in in ingresso! poi l'utente li accettera' o meno! Li metto in messages
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
# sintassi di msg:
#OFPPacketIn(buffer_id=256,cookie=0,data='\x01\x80\xc2\x00\x00\x0e\x8e\xf5\xa4\xcd\xa4j\x88\xcc\x02\x16\x07
#dpid:0000000000000001\x04\x05\x02\x00\x00\x00\x02\x06\x02\x00x\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
#match=OFPMatch(oxm_fields={'in_port': 2}),reason=0,table_id=0,total_len=60))
in_port = msg.match['in_port'] #su quale porta dello switch?
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
dpid = datapath.id #quale switch? torna l'id (es: 1, 2 ecc)
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
src = eth.src #indirizzo eth src (=mac address)
dst = eth.dst #indirizzo eth dst (=mac address)
# Sotto aggiungiamo le informazioni sullo switch dpid
# Ad ogni indirizzo MAC associa la porta dello switch
#se il dpid dello switch non esiste nella mac address table, lo aggiungo con ovviamente la lista di mac e porte settata a {} (vuota).
#Se lo switch c'era gia', il metodo non fa nulla!
self.mac_to_port.setdefault(dpid, {})
# learn a mac address to avoid FLOOD next time.
#in poche parole associo l'indirizzo mac source con la porta in ingresso.
#Cioe' associo il dispositivo fisico (mac address) in ingresso con la porta dello switch su cui ascolta!
#E' come se registrassi chi ha fatto la richiesta! Cioe' associo il mac address alla porta su cui ascolta questo dispositivo!
#Percio' per esempio un pacchetto di ritorno non dovra' fare flood perche' ora si sa a quale porta e' associato il dispositivo (mac addresss) a cui devo inviare!
#La tabella sara' fatta come segue (come dicevamo sopra):
#mac_to_port = {1: {"00:00:00:02": 2, "00:00:00:01": 1}, 2: {"00:00:00:02": 1, "00:00:00:01":2}}
self.mac_to_port[dpid][src] = in_port
#ora devo trovare il mac address di destinazione nella tabella dei mac address:
#Se associato allo switch dpid esiste un campo destinazione, estraggo la porta out a partire dall'indirizzo mac dst
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
#altrimenti per forza la porta di uscita sara' un flood: pacchetto inviato a tutte le porte di uscita.
#In tal modo spero di raggiungere il mac address della destinazione
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
#RITROVAMENTO PROTOCOLLO
protocol = self.getProtocol(pkt)
#print "protocol: %s" % protocol
#print "STORIA: %s" % story
#print "DEBUG: Packet in src %s dst %s con protocollo %s" % (src, dst, protocol)
key = "%s %s %s" % (src, dst, protocol)
if key not in self.story and self.filtered_ip(dst, eth):
# appendo il messaggio appena arrivato alla lista dei messaggi in attesa
self.messages.append(ev.msg)
# scrivo in output la sorgente e la destinazione separati da uno spazio
self.communications += str(src)
self.communications += ' '
self.communications += str(dst)
self.communications += ' '
self.communications += str(protocol)
self.communications += '\n'
self.story.append(key)
#print "Aggiunto %s alla storia!" % key
if self.filtered_ip(dst, eth) == False:
data = None #i dati da inviare allo switch vengono posti a none.
#Perche'? Perche' possono essere bufferizzati all'interno dello switch (e identificati da un buffer_id)
if msg.buffer_id == ofproto.OFP_NO_BUFFER: #se non esiste nessun buffer_id, i dati vengono presi dal packet_in in ingresso
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
#il messaggio di packet out si comporta in due modi a seconda che i dati siano bufferizzati o meno all'interno dello switch:
#se lo sono, si andranno a beccare tali dati tramite il buffer_id, se non lo sono il campo data non viene riempito dall'if appena sopra e quindi il controller
#manda allo switch un flow mod completo anche dei dati
datapath.send_msg(out)
| gpl-3.0 | -781,128,038,751,831,400 | 52.058667 | 833 | 0.675881 | false |
JanFan/py-aho-corasick | cmp.py | 1 | 2262 | # -*- coding: utf-8 -*-
'''
Performance Testing
Requirements:
pip install pyahocorasick
'''
import random
import string
import time
from py_aho_corasick import py_aho_corasick
import ahocorasick
rand_str = lambda n: ''.join([random.choice(string.ascii_lowercase) for i in range(n)])
if __name__ == '__main__':
N = 1000000
text = rand_str(N)
keywords = list()
NW = 50000
for i in range(NW):
nw = random.randint(5,10)
kw = rand_str(nw)
keywords.append(kw)
# pyahocorasick
start_t = time.time()
A = ahocorasick.Automaton()
for idx, key in enumerate(keywords):
A.add_word(key, (idx, key))
A.make_automaton()
delta_build1 = time.time() - start_t
start_t = time.time()
cnt1 = 0
for end_index, (insert_order, original_value) in A.iter(text):
start_index = end_index - len(original_value) + 1
assert text[start_index:start_index + len(original_value)] == original_value
cnt1 += 1
delta_search1 = time.time() - start_t
# py_aho_corasick
start_t = time.time()
A = py_aho_corasick.Automaton(keywords)
delta_build2 = time.time() - start_t
start_t = time.time()
kv = A.get_keywords_found(text)
cnt2 = 0
for idx,k,v in kv:
assert text[idx:idx+len(k)] == k
cnt2 += 1
delta_search2 = time.time() - start_t
# brute force
start_t = time.time()
cnt3 = 0
for kw in keywords:
beg = 0
while beg < len(text):
idx = text.find(kw, beg)
if idx == -1:
break
else:
assert text[idx:idx+len(kw)] == kw
beg = idx + 1
cnt3 += 1
delta_search3 = time.time() - start_t
print(cnt1)
assert cnt1 == cnt2
assert cnt1 == cnt3
# output
print('pyahocorasick: text of {0} length, {1} keywords, building time {2} and searching time cost {3}'.format(N,NW,delta_build1,delta_search1))
print('py_aho_corasick: text of {0} length, {1} keywords, building time {2} and searching time cost {3}'.format(N,NW,delta_build2,delta_search2))
print('brute force: text of {0} length, {1} keywords, building time {2} and searching time cost {3}'.format(N,NW,0,delta_search3))
| mit | -589,962,487,331,969,400 | 26.925926 | 149 | 0.589302 | false |
irwinsnet/DesPy | despy/dp.py | 1 | 3255 | # Despy: A discrete event simulation framework for Python
# Version 0.1
# Released under the MIT License (MIT)
# Copyright (c) 2015, Stacy Irwin
"""
********
despy.dp
********
.. autosummary::
"""
EARLY = -1
STANDARD = 0
LATE = 1
class AbstractPackage():
def __init__(self):
import despy.abstract.model
self.AbstractModel = despy.abstract.model.AbstractModel
abstract = AbstractPackage()
del AbstractPackage
class StatsPackage():
def __init__(self):
from despy.stats.random import get_empirical_pmf, get_poisson_pmf
self.get_empirical_pmf = get_empirical_pmf
self.get_poisson_pmf = get_poisson_pmf
stats = StatsPackage()
del StatsPackage
from despy.session import Session, Config # @UnusedImport
class OutputPackage():
def __init__(self):
import despy.output.report
self.HtmlReport = despy.output.report.HtmlReport
self.Datatype = despy.output.report.Datatype
import despy.output.results
#IMPORTS despy.output.trace
self.results = despy.output.results.Results
import despy.output.statistic
self.AbstractStatistic = despy.output.statistic.AbstractStatistic
self.DiscreteStatistic = despy.output.statistic.DiscreteStatistic
self.TimeWeightedStatistic = (
despy.output.statistic.TimeWeightedStatistic)
import despy.output.trace
self.Trace = despy.output.trace.Trace
self.TraceRecord = despy.output.trace.TraceRecord
import despy.output.plot
self.plot = despy.output.plot
import despy.output.console
self.console = despy.output.console
import despy.output.counter
self.Counter = despy.output.counter.Counter
output = OutputPackage()
del OutputPackage
class ModelPackage():
def __init__(self):
import despy.model.trigger
self.AbstractTrigger = despy.model.trigger.AbstractTrigger
self.TimeTrigger = despy.model.trigger.TimeTrigger
import despy.model.component
self.Component = despy.model.component.Component
import despy.model.process
#IMPORTS despy.fel.event
self.Process = despy.model.process.Process
self.ProcessTimeOutEvent = despy.model.process.ProcessTimeOutEvent
import despy.model.queue
self.Queue = despy.model.queue.Queue
import despy.model.entity
self.Entity = despy.model.entity.Entity
import despy.model.resource
self.Resource = despy.model.resource.Resource
self.ResourceQueue = despy.model.resource.ResourceQueue
self.ResourceFinishEvent = despy.model.resource.ResourceFinishServiceEvent
import despy.model.timer
self.RandomTimer = despy.model.timer.RandomTimer
self.TimerEvent = despy.model.timer.TimerEvent
model = ModelPackage()
del ModelPackage
class FelPackage():
def __init__(self):
import despy.fel.event
self.Event = despy.fel.event.Event
fel = FelPackage()
del FelPackage
from despy.simulation import Simulation # @UnusedImport
| mit | -9,093,828,271,728,340,000 | 28.590909 | 82 | 0.654378 | false |
ExCiteS/geokey-dataimports | geokey_dataimports/tests/test_urls.py | 1 | 6547 | """All tests for URLs."""
from django.test import TestCase
from django.core.urlresolvers import reverse, resolve
from ..views import (
IndexPage,
AllDataImportsPage,
AddDataImportPage,
SingleDataImportPage,
DataImportCreateCategoryPage,
DataImportAssignFieldsPage,
DataImportAllDataFeaturesPage,
RemoveDataImportPage
)
class UrlsTest(TestCase):
"""Test all URLs."""
# ###########################
# TEST ADMIN PAGES
# ###########################
def test_index_page_reverse(self):
"""Test reverser for index page."""
reversed_url = reverse('geokey_dataimports:index')
self.assertEqual(reversed_url, '/admin/dataimports/')
def test_index_page_resolve(self):
"""Test resolver for index page."""
resolved_url = resolve('/admin/dataimports/')
self.assertEqual(resolved_url.func.__name__, IndexPage.__name__)
def test_all_data_imports_page_reverse(self):
"""Test reverser for all data imports page."""
reversed_url = reverse(
'geokey_dataimports:all_dataimports',
kwargs={'project_id': 1}
)
self.assertEqual(reversed_url, '/admin/projects/1/dataimports/')
def test_all_data_imports_page_resolve(self):
"""Test resolver for all data imports page."""
resolved_url = resolve('/admin/projects/1/dataimports/')
self.assertEqual(
resolved_url.func.__name__,
AllDataImportsPage.__name__
)
self.assertEqual(int(resolved_url.kwargs['project_id']), 1)
def test_add_data_import_page_reverse(self):
"""Test reverser for adding data import page."""
reversed_url = reverse(
'geokey_dataimports:dataimport_add',
kwargs={'project_id': 1}
)
self.assertEqual(reversed_url, '/admin/projects/1/dataimports/add/')
def test_add_data_import_page_resolve(self):
"""Test resolver for adding data import page."""
resolved_url = resolve('/admin/projects/1/dataimports/add/')
self.assertEqual(
resolved_url.func.__name__,
AddDataImportPage.__name__
)
self.assertEqual(int(resolved_url.kwargs['project_id']), 1)
def test_single_data_import_page_reverse(self):
"""Test reverser for single data import page."""
reversed_url = reverse(
'geokey_dataimports:single_dataimport',
kwargs={'project_id': 1, 'dataimport_id': 5}
)
self.assertEqual(reversed_url, '/admin/projects/1/dataimports/5/')
def test_single_data_import_page_resolve(self):
"""Test resolver for single data import page."""
resolved_url = resolve('/admin/projects/1/dataimports/5/')
self.assertEqual(
resolved_url.func.__name__,
SingleDataImportPage.__name__
)
self.assertEqual(int(resolved_url.kwargs['project_id']), 1)
self.assertEqual(int(resolved_url.kwargs['dataimport_id']), 5)
def test_data_import_create_category_page_reverse(self):
"""Test reverser for data import creating category page."""
reversed_url = reverse(
'geokey_dataimports:dataimport_create_category',
kwargs={'project_id': 1, 'dataimport_id': 5}
)
self.assertEqual(
reversed_url,
'/admin/projects/1/dataimports/5/create-category/'
)
def test_data_import_create_category_page_resolve(self):
"""Test resolver for data import creating category page."""
resolved_url = resolve(
'/admin/projects/1/dataimports/5/create-category/'
)
self.assertEqual(
resolved_url.func.__name__,
DataImportCreateCategoryPage.__name__
)
self.assertEqual(int(resolved_url.kwargs['project_id']), 1)
self.assertEqual(int(resolved_url.kwargs['dataimport_id']), 5)
def test_data_import_assign_fields_page_reverse(self):
"""Test reverser for data import assigning fields page."""
reversed_url = reverse(
'geokey_dataimports:dataimport_assign_fields',
kwargs={'project_id': 1, 'dataimport_id': 5}
)
self.assertEqual(
reversed_url,
'/admin/projects/1/dataimports/5/assign-fields/'
)
def test_data_import_assign_fields_page_resolve(self):
"""Test resolver for data import assigning fields page."""
resolved_url = resolve(
'/admin/projects/1/dataimports/5/assign-fields/'
)
self.assertEqual(
resolved_url.func.__name__,
DataImportAssignFieldsPage.__name__
)
self.assertEqual(int(resolved_url.kwargs['project_id']), 1)
self.assertEqual(int(resolved_url.kwargs['dataimport_id']), 5)
def test_data_import_all_datadeatures_page_reverse(self):
"""Test reverser for data import all data features page."""
reversed_url = reverse(
'geokey_dataimports:dataimport_all_datafeatures',
kwargs={'project_id': 1, 'dataimport_id': 5}
)
self.assertEqual(
reversed_url,
'/admin/projects/1/dataimports/5/datafeatures/'
)
def test_data_import_all_datafeatures_page_resolve(self):
"""Test resolver for data import all data features page."""
resolved_url = resolve('/admin/projects/1/dataimports/5/datafeatures/')
self.assertEqual(
resolved_url.func.__name__,
DataImportAllDataFeaturesPage.__name__
)
self.assertEqual(int(resolved_url.kwargs['project_id']), 1)
self.assertEqual(int(resolved_url.kwargs['dataimport_id']), 5)
def test_remove_data_import_page_reverse(self):
"""Test reverser for removing data import page."""
reversed_url = reverse(
'geokey_dataimports:dataimport_remove',
kwargs={'project_id': 1, 'dataimport_id': 5}
)
self.assertEqual(
reversed_url,
'/admin/projects/1/dataimports/5/remove/'
)
def test_remove_data_import_page_resolve(self):
"""Test resolver for removing data import page."""
resolved_url = resolve('/admin/projects/1/dataimports/5/remove/')
self.assertEqual(
resolved_url.func.__name__,
RemoveDataImportPage.__name__
)
self.assertEqual(int(resolved_url.kwargs['project_id']), 1)
self.assertEqual(int(resolved_url.kwargs['dataimport_id']), 5)
| mit | 2,995,298,854,237,916,000 | 36.843931 | 79 | 0.609287 | false |
bitcraft/pyglet | examples/programming_guide/window_subclass.py | 1 | 2116 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Demonstrates a useful pattern for pyglet applications: subclassing Window.
"""
import pyglet
class HelloWorldWindow(pyglet.window.Window):
def __init__(self):
super().__init__()
self.label = pyglet.text.Label('Hello, world!')
def on_draw(self):
self.clear()
self.label.draw()
window = HelloWorldWindow()
pyglet.app.run()
| bsd-3-clause | 2,403,309,868,462,289,400 | 38.185185 | 78 | 0.689509 | false |
ma-ver-ick/pyaphrodite | prepare_images.py | 1 | 1536 | from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import cv2
import os
import dto
import pickle
ROOT_DIR = "/home/pi/jme3/assets/Textures/"
RESIZE_WIDTH = 1920 # Raspberry pi texture size
def list_files(directory, extension):
ret = []
for file in os.listdir(directory):
if not file.lower().endswith(extension):
continue
ret.append(directory + os.sep + file)
return ret
all_files = list()
all_files.extend(list_files(ROOT_DIR, ".jpg"))
database = list()
for file in all_files:
print("Processing file: %s" % (file))
img = cv2.imread(file)
height, width, depth = img.shape
aspect_ratio = float(width) / float(height)
new_height = RESIZE_WIDTH / aspect_ratio
temp_debug_msg = "\tResizing from (%4.0f, %4.0f) to (%4.0f, %4.0f)"
temp_debug_tuple = (width, height, RESIZE_WIDTH, new_height)
print(temp_debug_msg % temp_debug_tuple)
dim = (int(RESIZE_WIDTH), int(new_height))
resized = cv2.resize(img, dim, interpolation = cv2.INTER_LANCZOS4)
orig_path, orig_filename = os.path.split(file)
orig_filename, orig_file_ext = os.path.splitext(orig_filename)
optimized_filename = orig_path + os.sep + orig_filename
optimized_filename += ".optimized.png"
cv2.imwrite(optimized_filename, resized)
p = dto.PictureDTO(file, width, height, depth, optimized_filename, dim[0], dim[1])
database.append(p)
database_path = ROOT_DIR + os.sep + "database.pickle"
print("Saving database to " + database_path)
pickle.dump(database, open(database_path, "wp"))
| mit | 4,709,988,111,238,682,000 | 26.927273 | 83 | 0.708333 | false |
dongqunxi/GrangerCausality | Preprocessing/CTPS_identifation_BrainComponents.py | 1 | 4218 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 21 10:42:55 2014
@author: imenb101
"""
import numpy as np
import matplotlib.pylab as pl
import mne, sys, os
from mne.viz import tight_layout
from mne.fiff import Raw
from mne.preprocessing import ICA
from ctps import compute_ctps
from ctps import plot_ctps_panel
try:
subject = sys.argv[1]
trigger = sys.argv[2]#Get the trigger is stim or resp
except:
print "Please run with input file provided. Exiting"
sys.exit()
res_ch_name = 'STI 013'
sti_ch_name = 'STI 014'
n_components=0.99
n_pca_components=None
max_pca_components=None
subjects_dir = '/home/qdong/data/'
subject_path = subjects_dir + subject#Set the data path of the subject
#raw_fname = subject_path + '/MEG/ssp_cleaned_%s_audi_cued-raw_cle.fif' %subject
raw_fname = subject_path + '/MEG/%s_audi_cued-raw_cle.fif' %subject
raw_basename = os.path.splitext(os.path.basename(raw_fname))[0]
raw = Raw(raw_fname, preload=True)
picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
ica = ICA(n_components=n_components, n_pca_components=n_pca_components, max_pca_components=max_pca_components, random_state=0)
ica.decompose_raw(raw, picks=picks, decim=3)
if trigger == 'resp':#'1' represents the response channel
add_from_raw = mne.fiff.pick_types(raw.info, meg=False, resp=True, exclude='bads')
sources_add = ica.sources_as_raw(raw, picks=add_from_raw)
events = mne.find_events(sources_add, stim_channel=res_ch_name)
raw_basename += '_resp'
elif trigger == 'stim':#'0' represents the stimuli channel
add_from_raw = mne.fiff.pick_types(raw.info, meg=False, stim=True, exclude='bads')
sources_add = ica.sources_as_raw(raw, picks=add_from_raw)
events = mne.find_events(sources_add, stim_channel=sti_ch_name)
raw_basename += '_stim'
else:
print "Please select the triger channel '1' for response channel or '0' for stimilus channel."
sys.exit()
# drop non-data channels (ICA sources are type misc)
#ica.n_pca_components=None
picks = mne.fiff.pick_types(sources_add.info, meg=False, misc=True, exclude='bads')
#Compare different bandwith of ICA components: 2-4, 4-8, 8-12, 12-16, 16-20Hz
l_f = 2
Brain_idx1=[]#The index of ICA related with trigger channels
axes_band = [221, 222, 223, 224]
ax_index = 0
for i in [4, 8, 12, 16]:
h_f = i
get_ylim = True
if l_f != 2:
get_ylim = False
sources_add = ica.sources_as_raw(raw, picks=add_from_raw)
#sources_add.filter(l_freq=l_f, h_freq=h_f, method='iir', n_jobs=4)
sources_add.filter(l_freq=l_f, h_freq=h_f, n_jobs=4, method='iir')
this_band = '%i-%iHz' % (l_f, h_f)
temp = l_f
l_f = h_f
# Epochs at R peak onset, from stim_eve.
ica_epochs_events = mne.Epochs(sources_add, events, event_id=1, tmin=-0.3, tmax=0.3,
picks=picks, preload=True, proj=False)
x_length = len(ica_epochs_events.ch_names)
# Compute phase values and statistics (significance values pK)
#phase_trial_ecg, pk_dyn_ecg, _ = compute_ctps(ica_epochs_ecg.get_data())
_ , pk_dyn_stim, phase_trial = compute_ctps(ica_epochs_events.get_data())
# Get kuiper maxima
pk_max = pk_dyn_stim.max(axis=1)
Brain_sources = pk_max > 0.1 # bool array, get the prominient components related with trigger
Brain_ind = np.where(Brain_sources)[0].tolist() # indices
#skip the null idx related with response
Brain_idx1 += (Brain_ind)#Get the obvious sources related
#Plot the bar
#ax = pl.subplot(axes_band[ax_index])
#pk_max.plot(axes=ax_index, ylim=ylim_ecg, xlim=xlim1)
pl.subplot(axes_band[ax_index])
x_bar = np.arange(x_length)
pl.bar(x_bar, pk_max)
for x in Brain_ind:
pl.bar(x, pk_max[x], facecolor='r')
pl.axhline(0.1, color='k', label='threshod')
pl.xlabel('%s' %this_band)
pl.ylim(0, 0.5)
ax_index += 1
pl.tight_layout()
pl.show()
#pl.savefig(subject_path+'/MEG/ctps_distribution_%s_%s_withoutSSP.png'%(subject, trigger))
pl.savefig(subject_path+'/MEG/ctps_distribution_%s_%s.png'%(subject, trigger))
Brain_idx = list(set(Brain_idx1))
print '%s has been identified as trigger components' %(Brain_idx)
| bsd-3-clause | -859,910,584,621,222,400 | 38.055556 | 127 | 0.672357 | false |
drhagen/parsita | src/parsita/metaclasses.py | 1 | 4934 | import inspect
import builtins
import re
from . import options
from .parsers import Parser, RegexParser
class ParsersDict(dict):
def __init__(self, old_options: dict):
super().__init__()
self.old_options = old_options # Holds state of options at start of definition
self.forward_declarations = dict() # Stores forward declarations as they are discovered
def __missing__(self, key):
class_body_globals = inspect.currentframe().f_back.f_globals
if key in class_body_globals:
return class_body_globals[key]
elif key in dir(builtins):
return getattr(builtins, key)
elif key in self.forward_declarations:
return self.forward_declarations[key]
else:
new_forward_declaration = ForwardDeclaration()
self.forward_declarations[key] = new_forward_declaration
return new_forward_declaration
def __setitem__(self, key, value):
if isinstance(value, Parser):
value.protected = True # Protects against accidental concatenation of sequential parsers
value.name = key # Used for better error messages
super().__setitem__(key, value)
class ForwardDeclaration(Parser):
def __init__(self):
self._definition = None
def __getattribute__(self, member):
if member != '_definition' and self._definition is not None:
return getattr(self._definition, member)
else:
return object.__getattribute__(self, member)
def define(self, parser: Parser) -> None:
self._definition = parser
def fwd() -> ForwardDeclaration:
"""Manually create a forward declaration.
Normally, forward declarations are created automatically by the contexts.
But they can be created manually if not in a context or if the user wants
to avoid confusing the IDE.
"""
return ForwardDeclaration()
class GeneralParsersMeta(type):
@classmethod
def __prepare__(mcs, name, bases, **_): # noqa: N804
old_options = {
'handle_literal': options.handle_literal,
'parse_method': options.parse_method,
}
options.handle_literal = options.wrap_literal
options.parse_method = options.basic_parse
return ParsersDict(old_options)
def __init__(cls, name, bases, dct, **_): # noqa: N805
old_options = dct.old_options
super().__init__(name, bases, dct)
# Resolve forward declarations, will raise if name not found
for name, forward_declaration in dct.forward_declarations.items():
obj = dct[name]
if not isinstance(obj, Parser):
obj = options.handle_literal(obj)
forward_declaration._definition = obj
# Reset global variables
for key, value in old_options.items():
setattr(options, key, value)
def __call__(cls, *args, **kwargs):
raise TypeError('Parsers cannot be instantiated. They use class bodies purely as contexts for managing '
'defaults and allowing forward declarations. Access the individual parsers as static '
'attributes.')
class GeneralParsers(metaclass=GeneralParsersMeta):
"""Context for parsing general sequences.
This is not a real class. Don't instantiate it. This is used by inheriting
from it and defining parsers as class attributes in the body of the child
class.
"""
pass
class TextParsersMeta(GeneralParsersMeta):
@classmethod
def __prepare__(mcs, name, bases, whitespace: str = options.default_whitespace): # noqa: N804
old_options = {
'whitespace': options.whitespace,
'handle_literal': options.handle_literal,
'parse_method': options.parse_method,
}
# Store whitespace in global location so regex parsers can see it
if isinstance(whitespace, str):
whitespace = re.compile(whitespace)
if whitespace is None:
options.whitespace = None
else:
options.whitespace = RegexParser(whitespace)
options.handle_literal = options.default_handle_literal
options.parse_method = options.default_parse_method
return ParsersDict(old_options)
def __new__(mcs, name, bases, dct, **_): # noqa: N804
return super().__new__(mcs, name, bases, dct)
class TextParsers(metaclass=TextParsersMeta):
r"""Context for parsing text.
This is not a real class. Don't instantiate it. This is used by inheriting
from it and defining parsers as class attributes in the body of the child
class.
There is a keyword argument for the metaclass ``whitespace``. This is a
regular expression defining the whitespace to be ignored. The default is
r"\s*".
"""
pass
__all__ = ['ForwardDeclaration', 'fwd', 'GeneralParsers', 'TextParsers']
| mit | -8,236,695,756,848,111,000 | 32.794521 | 112 | 0.64167 | false |
annahs/atmos_research | AL_incand_calib_SP217.py | 1 | 1802 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
from datetime import timedelta
import math
import numpy.polynomial.polynomial as poly
#mass fg, pk_ht, UNCORR
AL_HG_incand_calib = [
[0.23173,25.17577 ],
[0.41398,48.99595 ],
[1.26106,186.48122 ],
[2.88282,489.41296 ],
[5.43241,880.95554 ],
[8.94784,1347.39537],
]
HG_pkht = np.array([row[1] for row in AL_HG_incand_calib])
HG_mass = np.array([row[0] for row in AL_HG_incand_calib])
HG_mass_corr = np.array([row[0]/0.7 for row in AL_HG_incand_calib])
HG_fit = poly.polyfit(HG_pkht, HG_mass_corr, 1)
print 'HG fit', HG_fit
for line in AL_HG_incand_calib:
incand_pk_ht = line[1]
uncorr_mass_fg = line[0]
AD_corr_fit = HG_fit[0] + HG_fit[1]*incand_pk_ht
line.append(AD_corr_fit)
HG_pk_ht = [row[1] for row in AL_HG_incand_calib]
HG_uncorr_mass = [row[0] for row in AL_HG_incand_calib]
HG_uncorr_fit = [row[2]*0.7 for row in AL_HG_incand_calib]
HG_ADcorr_fit = [row[2] for row in AL_HG_incand_calib]
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(111)
ax.scatter(HG_pk_ht,HG_uncorr_mass,color='r', label = 'Uncorrected calibration')
ax.plot(HG_pk_ht,HG_ADcorr_fit, '--r', label = 'Aquadag correction applied')
ax.plot(HG_pk_ht,HG_uncorr_fit, '-r')
plt.xlabel('Incandescent pk height (a.u.)')
plt.ylabel('rBC mass (fg)')
plt.text(250,10, 'Aquadag corrected fit:\nrBC mass = -0.017584 + 9.2453E-3*pkht')
ax.set_ylim(0,14)
ax.set_xlim(0,2000)
plt.legend()
os.chdir('C:/Users/Sarah Hanna/Documents/Data/Alert Data/SP2 Calibrations/')
plt.savefig('Alert SP2#17 Aquadag calibration curves.png', bbox_inches='tight')
plt.show() | mit | 2,754,781,142,312,153,000 | 25.514706 | 81 | 0.709212 | false |
mcalmer/spacewalk | spacecmd/src/lib/group.py | 1 | 12970 | #
# Licensed under the GNU General Public License Version 3
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2013 Aron Parsons <[email protected]>
#
# NOTE: the 'self' variable is an instance of SpacewalkShell
# wildcard import
# pylint: disable=W0401,W0614
# unused argument
# pylint: disable=W0613
# invalid function name
# pylint: disable=C0103
import os
import re
import shlex
try:
from xmlrpc import client as xmlrpclib
except ImportError:
import xmlrpclib
from spacecmd.utils import *
def help_group_addsystems(self):
print('group_addsystems: Add systems to a group')
print('usage: group_addsystems GROUP <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_group_addsystems(self, text, line, beg, end):
parts = shlex.split(line)
if line[-1] == ' ':
parts.append('')
if len(parts) == 2:
return tab_completer(self.do_group_list('', True), text)
elif len(parts) > 2:
return self.tab_complete_systems(parts[-1])
return None
def do_group_addsystems(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_group_addsystems()
return
group_name = args.pop(0)
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
system_ids = []
for system in sorted(systems):
system_id = self.get_system_id(system)
if not system_id:
continue
system_ids.append(system_id)
self.client.systemgroup.addOrRemoveSystems(self.session,
group_name,
system_ids,
True)
####################
def help_group_removesystems(self):
print('group_removesystems: Remove systems from a group')
print('usage: group_removesystems GROUP <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_group_removesystems(self, text, line, beg, end):
parts = shlex.split(line)
if line[-1] == ' ':
parts.append('')
if len(parts) == 2:
return tab_completer(self.do_group_list('', True), text)
elif len(parts) > 2:
return self.tab_complete_systems(parts[-1])
return None
def do_group_removesystems(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_group_removesystems()
return
group_name = args.pop(0)
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
system_ids = []
for system in sorted(systems):
system_id = self.get_system_id(system)
if not system_id:
continue
system_ids.append(system_id)
print('Systems')
print('-------')
print('\n'.join(sorted(systems)))
if not self.user_confirm('Remove these systems [y/N]:'):
return
self.client.systemgroup.addOrRemoveSystems(self.session,
group_name,
system_ids,
False)
####################
def help_group_create(self):
print('group_create: Create a system group')
print('usage: group_create [NAME] [DESCRIPTION]')
def do_group_create(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if args:
name = args[0]
else:
name = prompt_user('Name:')
if len(args) > 1:
description = ' '.join(args[1:])
else:
description = prompt_user('Description:')
self.client.systemgroup.create(self.session, name, description)
####################
def help_group_delete(self):
print('group_delete: Delete a system group')
print('usage: group_delete NAME ...')
def complete_group_delete(self, text, line, beg, end):
return tab_completer(self.do_group_list('', True), text)
def do_group_delete(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_group_delete()
return
groups = args
self.do_group_details('', True)
if not self.user_confirm('Delete these groups [y/N]:'):
return
for group in groups:
self.client.systemgroup.delete(self.session, group)
####################
def help_group_backup(self):
print('group_backup: backup a system group')
print('''usage: group_backup NAME [OUTDIR])
OUTDIR defaults to $HOME/spacecmd-backup/group/YYYY-MM-DD/NAME
''')
def complete_group_backup(self, text, line, beg, end):
List = self.do_group_list('', True)
List.append('ALL')
return tab_completer(List, text)
def do_group_backup(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_group_backup()
return
groups = args
if len(args) == 1 and args[0] == 'ALL':
groups = self.do_group_list('', True)
outputpath_base = None
# use an output base from the user if it was passed
if len(args) == 2:
outputpath_base = datetime.now().strftime(os.path.expanduser(args[1]))
else:
outputpath_base = os.path.expanduser('~/spacecmd-backup/group')
# make the final output path be <base>/date
outputpath_base = os.path.join(outputpath_base,
datetime.now().strftime("%Y-%m-%d"))
try:
if not os.path.isdir(outputpath_base):
os.makedirs(outputpath_base)
except OSError:
logging.error('Could not create output directory')
return
for group in groups:
print("Backup Group: %s" % group)
details = self.client.systemgroup.getDetails(self.session, group)
outputpath = outputpath_base + "/" + group
print("Output File: %s" % outputpath)
fh = open(outputpath, 'w')
fh.write(details['description'])
fh.close()
####################
def help_group_restore(self):
print('group_restore: restore a system group')
print('usage: group_restore INPUTDIR [NAME] ...')
def complete_group_restore(self, text, line, beg, end):
parts = shlex.split(line)
if len(parts) > 1:
groups = self.do_group_list('', True)
groups.append('ALL')
return tab_completer(groups, text)
return None
def do_group_restore(self, args):
arg_parser = get_argument_parser()
(args, options) = parse_command_arguments(args, arg_parser)
inputdir = os.getcwd()
groups = []
files = {}
current = {}
if args:
inputdir = args[0]
groups = args[1:]
else:
self.help_group_restore()
return
inputdir = os.path.abspath(inputdir)
logging.debug("Input Directory: %s" % (inputdir))
# make a list of file items in the input dir
if os.path.isdir(inputdir):
d_content = os.listdir(inputdir)
for d_item in d_content:
if os.path.isfile(inputdir + "/" + d_item):
logging.debug("Found file %s" % inputdir + "/" + d_item)
files[d_item] = inputdir + "/" + d_item
else:
logging.error("Restore dir %s does not exits or is not a directory" % inputdir)
return
if not files:
logging.error("Restore dir %s has no restore items" % inputdir)
return
if (len(groups) == 1 and groups[0] == 'ALL') or not groups:
groups = files.keys()
elif groups:
for group in groups:
if group in files:
groups.append(group)
else:
logging.error("Group %s was not found in backup" % (group))
for groupname in self.do_group_list('', True):
details = self.client.systemgroup.getDetails(self.session, groupname)
current[groupname] = details['description']
current[groupname] = current[groupname].rstrip('\n')
for groupname in files:
fh = open(files[groupname], 'r')
details = fh.read()
fh.close()
details = details.rstrip('\n')
if groupname in current and current[groupname] == details:
logging.debug("Already have %s" % groupname)
continue
elif groupname in current:
logging.debug("Already have %s but the description has changed" % groupname)
if is_interactive(options):
print("Changing description from:")
print("\n\"%s\"\nto\n\"%s\"\n" % (current[groupname], details))
userinput = prompt_user('Continue [y/N]:')
if re.match('y', userinput, re.I):
logging.info("Updating description for group: %s" % groupname)
self.client.systemgroup.update(self.session, groupname, details)
else:
logging.info("Updating description for group: %s" % groupname)
self.client.systemgroup.update(self.session, groupname, details)
else:
logging.info("Creating new group %s" % groupname)
group = self.client.systemgroup.create(self.session, groupname, details)
####################
def help_group_list(self):
print('group_list: List available system groups')
print('usage: group_list')
def do_group_list(self, args, doreturn=False):
groups = self.client.systemgroup.listAllGroups(self.session)
groups = [g.get('name') for g in groups]
if doreturn:
return groups
else:
if groups:
print('\n'.join(sorted(groups)))
return None
####################
def help_group_listsystems(self):
print('group_listsystems: List the members of a group')
print('usage: group_listsystems GROUP')
def complete_group_listsystems(self, text, line, beg, end):
return tab_completer(self.do_group_list('', True), text)
def do_group_listsystems(self, args, doreturn=False):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_group_listsystems()
return None
group = args[0]
try:
systems = self.client.systemgroup.listSystems(self.session, group)
systems = [s.get('profile_name') for s in systems]
except xmlrpclib.Fault:
logging.warning('%s is not a valid group' % group)
return []
if doreturn:
return systems
else:
if systems:
print('\n'.join(sorted(systems)))
return None
####################
def help_group_details(self):
print('group_details: Show the details of a system group')
print('usage: group_details GROUP ...')
def complete_group_details(self, text, line, beg, end):
return tab_completer(self.do_group_list('', True), text)
def do_group_details(self, args, short=False):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_group_details()
return
add_separator = False
for group in args:
try:
details = self.client.systemgroup.getDetails(self.session,
group)
systems = self.client.systemgroup.listSystems(self.session,
group)
systems = [s.get('profile_name') for s in systems]
except xmlrpclib.Fault:
logging.warning('%s is not a valid group' % group)
return
if add_separator:
print(self.SEPARATOR)
add_separator = True
print('Name %s' % details.get('name'))
print('Description: %s' % details.get('description'))
print('Number of Systems: %i' % details.get('system_count'))
if not short:
print('')
print('Members')
print('-------')
print('\n'.join(sorted(systems)))
| gpl-2.0 | -2,932,662,179,444,418,000 | 27.195652 | 88 | 0.594217 | false |
vbmacher/emuStudio | add_server.py | 1 | 4717 | #!/usr/bin/env python
import sys
import os
import os.path
import shutil
import xml.dom.minidom
from xml.dom.minidom import getDOMImplementation
from xml.dom.minidom import parseString
from subprocess import call
def get_vars():
errorMsg = ""
travisSecurityVars = os.environ["TRAVIS_SECURE_ENV_VARS"]
if travisSecurityVars == "false":
errorMsg = "\nNo secure env vars available; "
masterPassword = os.getenv("MASTER_PASSWORD", "false")
if masterPassword == "false":
errorMsg += "\nMaster security password is not set; "
userName = os.getenv("EMUSTUDIO_USERNAME", "false")
if userName == "false":
errorMsg += "\nServer user name is not set; "
password = os.getenv("EMUSTUDIO_PASSWORD", "false")
if password == "false":
errorMsg += "\nServer password is not set"
if errorMsg != "":
print errorMsg
sys.exit(1)
return (masterPassword, userName, password)
def get_or_create(xmldoc, name, element=None):
if element == None:
element = xmldoc
children = element.getElementsByTagName(name)
if len(children) == 0:
children = [xmldoc.createElement(name)]
element.appendChild(children[0])
return children[0]
def recreate(xmldoc, name, element=None):
if element == None:
element = xmldoc
children = element.getElementsByTagName(name)
if len(children) == 0:
theChild = xmldoc.createElement(name)
element.appendChild(theChild)
else:
theChild = children[0]
for child in theChild.childNodes:
theChild.removeChild(child)
return theChild
def prettify(node):
return '\n'.join([line for line in node.toprettyxml(indent=' ').split('\n') if line.strip()])
def create_settings_security(path, masterPassword):
try:
xmldoc = xml.dom.minidom.parse(path)
except:
xmldoc = getDOMImplementation().createDocument(None, "settingsSecurity", None)
securityElement = get_or_create(xmldoc, "settingsSecurity")
masterElement = recreate(xmldoc, "master", securityElement)
securityNode = xmldoc.createTextNode(masterPassword)
masterElement.appendChild(securityNode)
return prettify(xmldoc)
def create_settings(path, userName, password):
try:
xmldoc = xml.dom.minidom.parse(path)
except:
xmldoc = getDOMImplementation().createDocument(None, "settings", None)
settingsElement = get_or_create(xmldoc, "settings")
serversElement = get_or_create(xmldoc, "servers", settingsElement)
for child in serversElement.getElementsByTagName("server"):
serversElement.removeChild(child)
serverElement = recreate(xmldoc, "server", serversElement)
serverIdElement = xmldoc.createElement("id")
serverUserElement = xmldoc.createElement("username")
serverPasswordElement = xmldoc.createElement("password")
serverIdNode = xmldoc.createTextNode("emustudio-repository")
serverUserNode = xmldoc.createTextNode(userName)
serverPasswordNode = xmldoc.createTextNode(password)
serverIdElement.appendChild(serverIdNode)
serverUserElement.appendChild(serverUserNode)
serverPasswordElement.appendChild(serverPasswordNode)
serverElement.appendChild(serverIdElement)
serverElement.appendChild(serverUserElement)
serverElement.appendChild(serverPasswordElement)
# Turn off interactive mode
interactiveNode = recreate(xmldoc, "interactiveMode", settingsElement)
interactiveValue = xmldoc.createTextNode("false")
interactiveNode.appendChild(interactiveValue)
return prettify(xmldoc)
def write_file(path, content, mode='w'):
file = open(path, mode)
file.write(content)
file.close()
def backup_or_create(path):
if os.path.exists(path):
shutil.copy2(path, path + ".bak")
else:
write_file(path, "")
homedir = os.path.expanduser("~")
settingsSecurityPath = homedir + '/.m2/settings-security.xml'
settingsPath = homedir + '/.m2/settings.xml'
knownHostsPath = homedir + "/.ssh/known_hosts"
vars = get_vars()
backup_or_create(settingsSecurityPath)
backup_or_create(settingsPath)
try:
settingsSecurityXml = create_settings_security(settingsSecurityPath, vars[0])
settingsXml = create_settings(settingsPath, vars[1], vars[2])
write_file(settingsSecurityPath, settingsSecurityXml)
write_file(settingsPath, settingsXml)
# append sourceforge.net public ssh key fingerprint (if disabling strict host checking doesn't work)
call(['ssh-keygen', '-R', 'web.sourceforge.net'])
with open(knownHostsPath, "w") as outfile:
call(['ssh-keyscan', '-H', 'web.sourceforge.net'], stdout=outfile)
except:
print "Unexpected error occured"
pass
| gpl-2.0 | -6,555,765,835,985,879,000 | 31.088435 | 104 | 0.705745 | false |
kvaps/vdsm | tests/netlinkTests.py | 1 | 6086 | from collections import deque
import threading
import time
from functional import dummy
from functional.networkTests import IP_ADDRESS, IP_CIDR
from vdsm.netlink import monitor
from vdsm.sysctl import is_disabled_ipv6
from vdsm.utils import monotonic_time
from testValidation import ValidateRunningAsRoot
from testlib import VdsmTestCase as TestCaseBase
class NetlinkEventMonitorTests(TestCaseBase):
TIMEOUT = 1
@ValidateRunningAsRoot
def test_iterate_after_events(self):
with monitor.Monitor(timeout=self.TIMEOUT) as mon:
dummy_name = dummy.create()
dummy.remove(dummy_name)
for event in mon:
if event.get('name') == dummy_name:
break
@ValidateRunningAsRoot
def test_iterate_while_events(self):
"""Tests if monitor is able to catch event while iterating. Before the
iteration we start _set_and_remove_device, which is delayed for .2
seconds. Then iteration starts and wait for new dummy.
"""
dummy_name = dummy.create()
def _set_and_remove_device():
time.sleep(.2)
dummy.setLinkUp(dummy_name)
dummy.remove(dummy_name)
add_device_thread = threading.Thread(target=_set_and_remove_device)
with monitor.Monitor(timeout=self.TIMEOUT) as mon:
add_device_thread.start()
for event in mon:
if event.get('name') == dummy_name:
break
add_device_thread.join()
@ValidateRunningAsRoot
def test_stopped(self):
with monitor.Monitor(timeout=self.TIMEOUT) as mon:
dummy_name = dummy.create()
dummy.remove(dummy_name)
found = any(event.get('name') == dummy_name for event in mon)
self.assertTrue(found, 'Expected event was not caught.')
@ValidateRunningAsRoot
def test_event_groups(self):
with monitor.Monitor(timeout=self.TIMEOUT,
groups=('ipv4-ifaddr',)) as mon_a:
with monitor.Monitor(timeout=self.TIMEOUT,
groups=('link', 'ipv4-route')) as mon_l_r:
dummy_name = dummy.create()
dummy.setIP(dummy_name, IP_ADDRESS, IP_CIDR)
dummy.setLinkUp(dummy_name)
dummy.remove(dummy_name)
for event in mon_a:
self.assertIn('_addr', event['event'], "Caught event '%s' is not "
"related to address." % event['event'])
for event in mon_l_r:
link_or_route = ('_link' in event['event'] or
'_route' in event['event'])
self.assertTrue(link_or_route, "Caught event '%s' is not related "
"to link or route." % event['event'])
@ValidateRunningAsRoot
def test_iteration(self):
with monitor.Monitor(timeout=self.TIMEOUT) as mon:
iterator = iter(mon)
# Generate events to avoid blocking
dummy_name = dummy.create()
iterator.next()
dummy.remove(dummy_name)
iterator.next()
with self.assertRaises(StopIteration):
while True:
iterator.next()
@ValidateRunningAsRoot
def test_events_keys(self):
def _expected_events(nic, address, cidr):
events_add = [
{'event': 'new_link', 'name': nic},
{'event': 'new_addr', 'address': address + '/' + cidr},
{'event': 'new_link', 'name': nic}]
events_del = [
{'address': address + '/' + cidr, 'event': 'del_addr'},
{'destination': address, 'event': 'del_route'},
{'event': 'del_link', 'name': nic}]
events_ipv6 = [
{'event': 'new_addr', 'family': 'inet6'},
{'event': 'del_neigh'},
{'event': 'del_addr', 'family': 'inet6'}]
if is_disabled_ipv6():
return deque(events_add + events_del)
else:
return deque(events_add + events_ipv6 + events_del)
with monitor.Monitor(timeout=self.TIMEOUT,
silent_timeout=True) as mon:
dummy_name = dummy.create()
dummy.setIP(dummy_name, IP_ADDRESS, IP_CIDR)
dummy.setLinkUp(dummy_name)
dummy.remove(dummy_name)
expected_events = _expected_events(dummy_name, IP_ADDRESS, IP_CIDR)
expected = expected_events.popleft()
for event in mon:
if _is_subdict(expected, event):
expected = expected_events.popleft()
if len(expected_events) == 0:
break
self.assertEqual(0, len(expected_events), '%d expected events have not'
' been caught (in the right order)'
% (1 + len(expected_events)))
def test_timeout(self):
with self.assertRaises(monitor.MonitorError):
try:
with monitor.Monitor(timeout=.01) as mon:
for event in mon:
pass
except monitor.MonitorError as e:
self.assertEquals(e[0], monitor.E_TIMEOUT)
raise
self.assertTrue(mon.is_stopped())
def test_timeout_silent(self):
with monitor.Monitor(timeout=.01, silent_timeout=True) as mon:
for event in mon:
pass
self.assertTrue(mon.is_stopped())
@ValidateRunningAsRoot
def test_timeout_not_triggered(self):
time_start = monotonic_time()
with monitor.Monitor(timeout=self.TIMEOUT) as mon:
dummy_name = dummy.create()
dummy.remove(dummy_name)
for event in mon:
break
self.assertTrue((monotonic_time() - time_start) <= self.TIMEOUT)
self.assertTrue(mon.is_stopped())
def _is_subdict(subset, superset):
return all(item in superset.items() for item in subset.items())
| gpl-2.0 | -8,495,777,914,356,761,000 | 35.011834 | 79 | 0.555209 | false |
OscarES/serpentinetracker | beamline.py | 1 | 21534 | #!/usr/bin/python
#
# Copyright 2009, Stephen Molloy, Stewart Boogert
#
# This file is part of Serpentine.
#
# Serpentine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Serpentine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Serpentine. If not, see <http://www.gnu.org/licenses/>.
#
"""Define the physics functions and classes
(e.g. tracking, Rmat calcs, etc.)"""
# numpy arrays will be useful here
#from numpy import *
import numpy as np
from matplotlib.pylab import plot, subplot, xlabel, ylabel, legend
#from elements import *
#from scipy import weave
from utilities import RotMats
import beamadjust
import copy
from itertools import repeat
import re
# ===============================================================
# Lists of beamline components are almost, but not quite, the right
# way to define the lattice. Here we define a class to inherit from
# list, but with the multiplication operator redefined to do what
# we want
# The physics tools will be added on as methods of this new class
class Line(list):
"""class Line: A class designed to hold the list of elements that define
the beamline lattice. This class inherits from Python's built-in 'list'
class."""
def __mul__(self, fact):
"""Allows multiplication of a small lattice subset by an integer in
order to easily define a repeated section"""
new_line = Line()
copyfunc = lambda x: new_line.extend(copy.deepcopy(x))
for rep in repeat(copyfunc, fact):
rep(self)
# for rep in range(fact):
# new_line.extend(copy.deepcopy(self))
return new_line
def __repr__(self):
def namecatch(inst):
try: return str(inst.name)
except AttributeError: return "No name attr"
ret = '\n'.join(namecatch(ele)+" :: "+str(ele.__class__) for ele in self)
return ret
def FindEleByName(self, name):
from serpentine import Serpentine
p = re.compile("^" + name + "$")
indlist = list()
for i in xrange(len(self)):
if type(self[i]) == Serpentine:
try:
intern_list = self[i].beamline.FindEleByName(name)
[indlist.append([i, int_i]) for int_i in intern_list]
except ValueError, UnboundLocalError: pass
elif p.match(self[i].name):
indlist.append(i)
if indlist: return indlist
else: raise ValueError(name + ": Not found.")
def FindEleByType(self, classname):
from serpentine import Serpentine
p = re.compile("^" + classname + "$")
indlist = list()
for i in xrange(len(self)):
if type(self[i]) == Serpentine:
try:
intern_list = self[i].beamline.FindEleByType(classname)
[indlist.append([i, int_i]) for int_i in intern_list]
except ValueError, UnboundLocalError: pass
elif p.match(self[i].__class__.__name__):
indlist.append(i)
if indlist: return indlist
else: raise ValueError(classname + ": Not found.")
def GetEleByType(self, classname):
"""Returns a list of elements of class 'classtype' from self. This
returns the elements themselves, not their indices."""
def extractele(beamline, i, elelist):
if type(i)==int:
elelist.append(beamline[i])
elif type(i[1])==int:
elelist.append(beamline[i[0]].beamline[i[1]])
else:
extractele(beamline[i[0]].beamline, i[1], elelist)
elelist = list()
indlist = self.FindEleByType(classname)
for i in indlist:
extractele(self, i, elelist)
return elelist
def FindEleByObj(self, obj):
"""Returns the index at which the object 'obj' can be found in self."""
for i in range(len(self)):
if self[i].__class__.__name__ == 'Serpentine':
intern_list = self[i].beamline.FindEleByObj(obj)
eledict = dict()
eledict[i] = intern_list
return eledict
if obj == self[i] :
return i
return -1
def GetEleByName(self, name):
"""Returns a list of elements named 'name' from self. This returns
the elements themselves, not their indices."""
def extractele(beamline, i, elelist):
if type(i)==int:
elelist.append(beamline[i])
elif type(i[1])==int:
elelist.append(beamline[i[0]].beamline[i[1]])
else:
extractele(beamline[i[0]].beamline, i[1], elelist)
elems = list()
indlist = self.FindEleByName(name)
for i in indlist:
extractele(self, i, elems)
return elems
def RmatAtoB(self, first, last):
"""Returns the 6D R-matrix between the entrance of self[first], and
the exit of self[last]."""
rmat = np.eye(6)
for i in self[first:last+1]:
rmat = np.dot(i.R, rmat)
return rmat
def Track(self, beam, offset=np.array([0, 0, 0, 0, 0, 0])):
"""The primary tracking method for the Line class.
It loops around each element of self, calculates the offsets due to
movers, offsets, etc., recalculates the energy variable of the beam
being tracked to delta_E/E, and then calls the 'TrackThruEle' method
of the element in question.
Once tracking is complete for that element, the offset and the beam's
energy variable are reset to their original values.
The loop then continues on to the next element.
The 'beam' input should be an object of class 'ElectronBeam' (or one
which inherits from that class).
Track returns the beam that results from tracking through the lattice.
"""
prog = ProgressBar(0, len(self), 77)
beam_out = copy.deepcopy(beam)
# Beam momentum is defined as absolute, but R matrices expect delta_P/P
for ele in self:
if ele.__class__.__name__ == 'Serpentine':
ele.beam_in = beam_out
ele.Track()
beam_out = ele.beam_out
continue
if sum(offset**2)>0:
beam_out.x = self._AdjustBeamByLineOffset(ele, beam_out, offset)
try:
beam_out.x = beamadjust.AdjustBeamWithMover(ele, beam_out)
except AttributeError: pass
if sum(ele.offset**2)>0:
beam_out.x = beamadjust.AdjustBeamByOffset(ele, beam_out)
try:
ele.Processor(beam_out)
except AttributeError: pass
beam_out.x[5, :] = (beam_out.x[5, :] - ele.P) / ele.P
beam_out = ele.TrackThruEle(beam_out)
beam_out.x[5, :] = (beam_out.x[5, :] * ele.P) + ele.P
if sum(ele.offset**2):
beam_out.x = beamadjust.ReAdjustBeamByOffset(ele, beam_out)
if hasattr(ele, 'Mover'):
beam_out.x = beamadjust.ReAdjustBeamWithMover(ele, beam_out)
if sum(offset**2)>0:
beam_out.x = self._ReAdjustBeamByLineOffset(
ele, beam_out, offset
)
prog.updateAmount(self.index(ele))
print prog, "\r",
return beam_out
def _AdjustBeamByLineOffset(self, ele, beam_out, offset):
"""Correct the beam position by the offset specified for the entire
beamline before the call to Track()"""
r_in = RotMats(-offset[5])[0]
line_length = self[-1].S - self[0].S
dist_along_line = ele.S - self[0].S
dist_normed = dist_along_line - (line_length/2) # dist from line centre
delta_x = (dist_normed * offset[1]) + offset[0]
delta_y = (dist_normed * offset[3]) + offset[2]
delta_xp = offset[1]
delta_yp = offset[3]
beam_out.x[0, :] -= delta_x
beam_out.x[1, :] -= delta_xp
beam_out.x[2, :] -= delta_y
beam_out.x[3, :] -= delta_yp
beam_out.x = np.dot(r_in, beam_out.x)
return beam_out.x
def _ReAdjustBeamByLineOffset(self, ele, beam_out, offset):
"""Reset the beam position by the offset specified for the entire
beamline after the call to Track()"""
r_out = RotMats(-offset[5])[1]
line_length = self[-1].S - self[0].S
dist_along_line = ele.S - self[0].S
dist_normed = dist_along_line - (line_length/2) # dist from line centre
delta_x = (dist_normed * offset[1]) + offset[0]
delta_y = (dist_normed * offset[3]) + offset[2]
delta_xp = offset[1]
delta_yp = offset[3]
beam_out.x[0, :] += delta_x
beam_out.x[1, :] += delta_xp
beam_out.x[2, :] += delta_y
beam_out.x[3, :] += delta_yp
beam_out.x = np.dot(r_out, beam_out.x)
return beam_out.x
def SetSPos(self, ini_s=0):
"""Sets the longitudinal position of each element based on an initial
value that defines the location of the upstream end of the first
element (ini_s), and the length of each subsequent element."""
cum_s = ini_s
for i in self:
if i.__class__.__name__ == 'Serpentine':
i.beamline.SetSPos(ini_s=cum_s)
if i.beamline[-1].__class__.__name__ != 'Serpentine':
cum_s = i.beamline[-1].S+i.beamline[-1].L
continue
i.S = cum_s
cum_s += i.L
def TwissProp(self, ini_twiss):
"""Propagates an initial twiss object ('ini_twiss') through the
lattice.
For each element, the twiss calculated at its downstream end are
stored as an attribute of that element. The twiss output at the
end of the lattice are returned from this function."""
sum_phix, sum_phiy = 0, 0
final_twiss = copy.deepcopy(ini_twiss)
finalgammax = (1+ini_twiss.alphax**2) / ini_twiss.betax
finalgammay = (1+ini_twiss.alphay**2) / ini_twiss.betay
for ele in self:
ele.twiss = copy.deepcopy(final_twiss)
if ele.__class__.__name__ == 'Serpentine':
ele.TwissProp()
continue
det_x = np.linalg.det(ele.R[0:2, 0:2])
det_y = np.linalg.det(ele.R[2:4, 2:4])
deltaphix = np.arctan2(ele.R[0, 1] , \
(final_twiss.betax*ele.R[0, 0] -
final_twiss.alphax*ele.R[0, 1]))
deltaphiy = np.arctan2(ele.R[2, 3] , \
(final_twiss.betay*ele.R[2, 2] -
final_twiss.alphay*ele.R[2, 3]))
sum_phix += deltaphix
sum_phiy += deltaphiy
betax = final_twiss.betax
alphax = final_twiss.alphax
gammax = finalgammax
betay = final_twiss.betay
alphay = final_twiss.alphay
gammay = finalgammay
final_twiss.betax = (
(ele.R[0, 0]**2 * betax) +
(-2*ele.R[0, 0]*ele.R[0, 1] * alphax) +
(ele.R[0, 1]**2 * gammax)
) / det_x
final_twiss.alphax = (
(-ele.R[0, 0]*ele.R[1, 0] * betax) +
((ele.R[0, 0]*ele.R[1, 1] + ele.R[0, 1]*ele.R[1, 0]) *
alphax) +
(-ele.R[0, 1]*ele.R[1, 1] * gammax)
) / det_x
finalgammax = (1 + final_twiss.alphax**2) / final_twiss.betax
final_twiss.betay = (
(ele.R[2, 2]**2 * betay) +
(-2*ele.R[2, 2]*ele.R[2, 3] * alphay) +
(ele.R[2, 3]**2 * gammay)
) / det_y
final_twiss.alphay = (
(-ele.R[2, 2]*ele.R[3, 2] * betay) +
((ele.R[2, 2]*ele.R[3, 3] + ele.R[2, 3]*ele.R[3, 2]) *
alphay) +
(-ele.R[2, 3]*ele.R[3, 3] * gammay)
) / det_y
finalgammay = (1 + final_twiss.alphay**2) / final_twiss.betay
etax = final_twiss.etax
etaxp = final_twiss.etaxp
etay = final_twiss.etay
etayp = final_twiss.etayp
final_twiss.etax = ele.R[0,0]*etax+ele.R[0,1]*etaxp+ele.R[0,5]
final_twiss.etaxp = ele.R[1,0]*etax+ele.R[1,1]*etaxp+ele.R[1,5]
final_twiss.etay = ele.R[2,2]*etay+ele.R[2,3]*etayp+ele.R[2,5]
final_twiss.etayp = ele.R[3,2]*etay+ele.R[3,3]*etayp+ele.R[3,5]
final_twiss.phix = sum_phix
final_twiss.phiy = sum_phiy
return final_twiss
def ZeroCors(self):
"""Sets the field of all correctors in the lattice to zero.
This is useful for reverting to the default lattice after a
steering operation has been performed."""
import elements
for ele in self:
if (type(ele) == elements.Xcor or
type(ele) == elements.Ycor or
type(ele) == elements.XYcor):
ele.B = 0
def SingleRmat(self, i):
"""Returns the already calculated R-matrix for beamline[i].
i.e. it returns beamline[i].R."""
return self[i].R
def GetMomProfile(self):
"""Returns the momentum profile of the reference particle"""
spos = [ele.S for ele in self]
mom = [ele.P for ele in self]
return (spos, mom)
def GetEkProfile(self, restmass):
"""Returns the kinetic energy profile of the reference particle"""
spos = [ele.S for ele in self]
kenergy = [np.sqrt(ele.P**2+restmass**2)-restmass for ele in self]
return (spos, kenergy)
def GetRFPhases(self):
"""Returns the RF phases of the AccCav objects in beamline."""
acccavs = self.GetEleByType('AccCav')
return [ele.phi for ele in acccavs]
def XRmat(self, ind=0):
"""Print the 2x2 block of the R matrix corresponding to the
horizontal transverse space. 'ind' is the element for which the
value is printed."""
print self[ind].name + " x matrix:"
print self[ind].R[0:2, 0:2]
def YRmat(self, ind=0):
"""Print the 2x2 block of the R matrix corresponding to the
vertical transverse space. 'ind' is the element for which the
value is printed."""
print self[ind].name + " y matrix:"
print self[ind].R[2:4, 2:4]
def LongRmat(self, ind=0):
"""Print the 2x2 block of the R matrix corresponding to the
longitudinal space. 'ind' is the element for which the value is
printed."""
print self[ind].name + " longitudinal matrix:"
print self[ind].R[4:6, 4:6]
def GetTwiss(self):
"""Returns a dictionary object containing the Twiss paramters
calculated for the beamline."""
twiss_dict = {}
twiss_dict['S'] = []
twiss_dict['betax'] = []
twiss_dict['betay'] = []
twiss_dict['alphax'] = []
twiss_dict['alphay'] = []
twiss_dict['phix'] = []
twiss_dict['phiy'] = []
twiss_dict['etax'] = []
twiss_dict['etay'] = []
twiss_dict['etaxp'] = []
twiss_dict['etayp'] = []
for ele in self:
if ele.__class__.__name__ == 'Serpentine':
subtwiss_dict = ele.beamline.GetTwiss()
twiss_dict['S'].extend(subtwiss_dict['S'])
twiss_dict['betax'].extend(subtwiss_dict['betax'])
twiss_dict['betay'].extend(subtwiss_dict['betay'])
twiss_dict['alphax'].extend(subtwiss_dict['alphax'])
twiss_dict['alphay'].extend(subtwiss_dict['alphay'])
twiss_dict['phix'].extend(subtwiss_dict['phix'])
twiss_dict['phiy'].extend(subtwiss_dict['phiy'])
twiss_dict['etax'].extend(subtwiss_dict['etax'])
twiss_dict['etay'].extend(subtwiss_dict['etay'])
twiss_dict['etaxp'].extend(subtwiss_dict['etaxp'])
twiss_dict['etayp'].extend(subtwiss_dict['etayp'])
else:
twiss_dict['S'].append(ele.S)
twiss_dict['betax'].append(ele.twiss.betax)
twiss_dict['betay'].append(ele.twiss.betay)
twiss_dict['alphax'].append(ele.twiss.alphax)
twiss_dict['alphay'].append(ele.twiss.alphay)
twiss_dict['phix'].append(ele.twiss.phix)
twiss_dict['phiy'].append(ele.twiss.phiy)
twiss_dict['etax'].append(ele.twiss.etax)
twiss_dict['etay'].append(ele.twiss.etay)
twiss_dict['etaxp'].append(ele.twiss.etaxp)
twiss_dict['etayp'].append(ele.twiss.etayp)
return twiss_dict
class ProgressBar:
"""A class to display a progress bar when tracking through a beamline."""
def __init__(self, minvalue = 0, maxvalue = 10, totalwidth=12):
self.progbar = "[]" # This holds the progress bar string
self.min = minvalue
self.max = maxvalue
self.span = maxvalue - minvalue
self.width = totalwidth
self.amount = 0 # When amount == max, we are 100% done
self.progbar = ""
self.percentdone = 0
self.updateAmount(0) # Build progress bar string
def updateAmount(self, new_amount = 0):
"""Calculate the percentage compled, and update the progbar string."""
if new_amount < self.min:
new_amount = self.min
if new_amount > self.max:
new_amount = self.max
self.amount = new_amount
self.percentDone()
self.makestr()
def percentDone(self):
"""Figure out the new percent done, round to an integer"""
difffrommin = float(self.amount - self.min)
percentdone = (difffrommin / float(self.span)) * 100.0
self.percentdone = int(round(percentdone))
def makestr(self):
"""Figure out how many hash bars the percentage should be"""
allfull = self.width - 2
numhashes = (self.percentdone / 100.0) * allfull
numhashes = int(round(numhashes))
# build a progress bar with hashes and spaces
self.progbar = "[" + '#'*numhashes + ' '*(allfull-numhashes) + "]"
# figure out where to put the percentage, roughly centered
percentplace = (len(self.progbar) / 2) - len(str(self.percentdone))
percentstring = str(self.percentdone) + "%"
# slice the percentage into the bar
self.progbar = self.progbar[0:percentplace] + percentstring + \
self.progbar[percentplace+len(percentstring):]
def __str__(self):
return str(self.progbar)
def fixborkedlist(borkedlist):
"""A method to repair the broken lists returned by the find methods.
This function should not be called by users."""
buildlist = list()
if isinstance(borkedlist, int):
return borkedlist
for i in borkedlist:
if isinstance(i, int):
buildlist.append(i)
else:
newlist = fixborkedlist(i)
for newi in newlist:
buildlist.append(newi)
return buildlist
# A test suite
if __name__ == '__main__':
from elements import Drift, Quad
import beamrep
import matplotlib.pylab as plt
Shortline = Line()
Shortline.append(Drift(name='ele1', L=0.75))
Shortline.append(Quad(name='ele2', L=0.25, B=5))
Shortline.append(Drift(name='ele3', L=1))
Shortline.append(Quad(name='ele4', L=0.25, B=-5))
beamline = Shortline * 5
# print "="*20
# print " SingleRmat"
# print "="*20
# for i in range(0, len(beamline)):
# print "%s: " % i,
# print SingleRmat(beamline, i)
# print
# print "="*20
# print " RmatAtoB"
# print "="*20
# for i in range(0, len(beamline)):
# print "%s: " % i,
# print RmatAtoB(beamline, 0, i)
print
print "="*20
print " TwissProp"
print "="*20
i_twiss = {}
i_twiss['betax'] = 1
i_twiss['alphax'] = 0
i_twiss['betay'] = 2
i_twiss['alphay'] = 0
f_twiss = beamline.TwissProp(i_twiss)
plt.figure(1)
beamline.PlotTwiss(f_twiss, ax=1, ay=1, px=1, py=1)
print "Assigning beam..."
beamin = beamrep.GaussBeam(N=1e4)
print "Starting tracking..."
# profile.run('beamout = elements.Tracking(beamin)')
# profile.run('beamout = beamline.Track(beamin)')
beamout = beamline.Track(beamin)
print "Done. Now printing figures."
plt.figure(2)
plt.subplot(121)
plt.plot(beamin.x[0, :], beamin.x[1, :], 'bx')
plt.subplot(121)
plt.plot(beamout.x[0, :], beamout.x[1, :], 'r.')
plt.subplot(122)
plt.plot(beamin.x[2, :], beamin.x[3, :], 'bx')
plt.subplot(122)
plt.plot(beamout.x[2, :], beamout.x[3, :], 'r.')
plt.show()
| gpl-3.0 | 1,009,788,563,410,026,200 | 38.152727 | 81 | 0.555029 | false |
uclouvain/osis | ddd/logic/effective_class_repartition/use_case/read/has_class_repartition_service.py | 1 | 1817 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from typing import Optional
from ddd.logic.learning_unit.builder.effective_class_identity_builder import EffectiveClassIdentityBuilder
from infrastructure.learning_unit.domain.service.tutor_distributed_to_class import TutorDistributedToClass
def has_class_repartition_service(
cmd: 'HasClassRepartitionCommand'
) -> Optional[str]:
effective_class_identity = EffectiveClassIdentityBuilder.build_from_command(cmd)
return TutorDistributedToClass.get_first_tutor_full_name_if_exists(
effective_class_identity
)
| agpl-3.0 | -5,357,264,174,100,881,000 | 46.789474 | 106 | 0.691079 | false |
xiaoli-chen/Godel | Youcheng/scrape_Tengxun.py | 1 | 1200 | from bs4 import BeautifulSoup
import urllib.request
import urllib.parse
import requests
# urllib.request
import re
import json
# 找到单个job的link
#url = "http://ssl.gongyi.qq.com/m/201799/realtime.html?et=rtfx>=rtfx&ADTAG=rtfx"
#url = "http://ssl.gongyi.qq.com/m/201799/realtime.html?tp=2&o=1"
# 初始页
#page = urllib.request.urlopen(url)
#soup = BeautifulSoup(page, 'lxml')
#all_matches = soup.findAll(attrs={'rel':['nofollow']})
#print(len(all_matches))
#api_url = 'http://ssl.gongyi.qq.com/cgi-bin/1799_gongyi_search_fund.fcgi?limit=100'
#page = requests.get(api_url)
url_page1='http://ssl.gongyi.qq.com/cgi-bin/1799_rank_ngo?type=ngobym&pg=1&md=9&jsoncallback=_martch99_sear_fn_'
url_page2='http://ssl.gongyi.qq.com/cgi-bin/1799_rank_ngo?type=ngobym&pg=2&md=9&jsoncallback=_martch99_sear_fn_'
url_page3='http://ssl.gongyi.qq.com/cgi-bin/1799_rank_ngo?type=ngobym&pg=3&md=9&jsoncallback=_martch99_sear_fn_'
url_page4='http://ssl.gongyi.qq.com/cgi-bin/1799_rank_ngo?type=ngobym&pg=4&md=9&jsoncallback=_martch99_sear_fn_'
page1 = requests.get(url_page1).text
#page1_text = urllib.parse.unquote(page1)
#page1_text
text_20 = re.search(r'\[(.*)\]',page1)
print(text_20.group())
| apache-2.0 | 5,858,549,715,973,136,000 | 29.358974 | 112 | 0.728885 | false |
hansomesong/TracesAnalyzer | Plot/Plot_newSize/Plot_variable_VP_scatter_RLOCs.py | 1 | 4458 | __author__ = 'yueli'
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from config.config import *
# Import the targeted raw CSV file
rawCSV_file1 = os.path.join(
CSV_FILE_DESTDIR,
'For_different_5_VP',
'Deleted_database',
'EID-153.16.47.16-MR-198.6.255.37',
"liege-EID-153.16.47.16-MR-198.6.255.37.log.csv"
)
rawCSV_file2 = os.path.join(
CSV_FILE_DESTDIR,
'For_different_5_VP',
'Deleted_database',
'EID-153.16.47.16-MR-198.6.255.37',
"temple-EID-153.16.47.16-MR-198.6.255.37.log.csv"
)
rawCSV_file3 = os.path.join(
CSV_FILE_DESTDIR,
'For_different_5_VP',
'Deleted_database',
'EID-153.16.47.16-MR-198.6.255.37',
"ucl-EID-153.16.47.16-MR-198.6.255.37.log.csv"
)
rawCSV_file4 = os.path.join(
CSV_FILE_DESTDIR,
'For_different_5_VP',
'Deleted_database',
'EID-153.16.47.16-MR-198.6.255.37',
"umass-EID-153.16.47.16-MR-198.6.255.37.log.csv"
)
rawCSV_file5 = os.path.join(
CSV_FILE_DESTDIR,
'For_different_5_VP',
'Deleted_database',
'EID-153.16.47.16-MR-198.6.255.37',
"wiilab-EID-153.16.47.16-MR-198.6.255.37.log.csv"
)
# Define a function to get the experiment number list from the CSV file
def getTime(rawCSV_file):
i = -1
for line in open(rawCSV_file):
i = i + 1
lines = line.split(";")
if lines[0] == "Round Type":
continue
else:
time.append(i)
return time
def getRlocSet(rawCSV_file):
i = -1
responseList = []
for line in open(rawCSV_file):
print line
i = i + 1
lines = line.split(";")
if lines[0] == "Round Type":
print "Round Type"
continue
else:
if lines[0] == "NegativeReply":
print "Done"
responseList.append(-1)
elif lines[0] == "RoundNoReply":
responseList.append(0)
elif lines[0] == "RoundNormal":
if int(lines[9]) == 1:
if lines[14].split(",")[1] == "195.59.156.123":
responseList.append(1)
elif lines[14].split(",")[1] == "195.59.156.124":
responseList.append(2)
else:
responseList.append(3)
else:
print "There are more than 2 RLOCs together"
else:
print "Unknown type exists"
return responseList
time = []
time = getTime(rawCSV_file1)
print "time", time
rlocSet1 = getRlocSet(rawCSV_file1)
print "rlocSet1:", rlocSet1.__len__()
rlocSet2= getRlocSet(rawCSV_file2)
print "rlocSet2:", rlocSet2.__len__()
rlocSet3 = getRlocSet(rawCSV_file3)
print "rlocSet3:", rlocSet3.__len__()
rlocSet4 = getRlocSet(rawCSV_file4)
print "rlocSet4:", rlocSet4.__len__()
rlocSet5 = getRlocSet(rawCSV_file5)
print "rlocSet5:", rlocSet5.__len__()
# Modify the size and dpi of picture, default size is (8,6), default dpi is 80
plt.gcf().set_size_inches(32, 17)
# Define font
font_label = {
'fontname' : 'Times New Roman',
'color' : 'black',
'fontsize' : 70
}
plt.scatter(time, rlocSet1, color='purple', marker="o", label="VP1", s=700)
plt.scatter(time, rlocSet2, color='green', marker='>', label="VP2", s=700)
plt.scatter(time, rlocSet3, color='red', marker=(5,0), label = "VP3", s=700)
plt.scatter(time, rlocSet4, color='orange', marker='*', label = "VP4", s=700)
plt.scatter(time, rlocSet5, color='blue', marker='+', label = "VP5", s=700)
response = np.linspace(-1, 2, 4)
plt.xlabel("experiment numbers", font_label)
plt.ylabel("different Map-Replies", font_label)
# plt.title("Map Replies over time for EID-153.16.47.16 from MR-198.6.255.37 in 5 VPs", fontsize=20)
plt.xlim(0,798)
# plt.xlim(550, 600)
plt.ylim(-2, 3)
plt.xticks(fontsize=45, fontname='Times New Roman')
plt.yticks(response, ('Negative\nMap-Reply', 'No Map-\nReply', 'RLOC 1', 'RLOC 2'), fontsize=45, fontname='Times New Roman')
# loc=1 makes legend locating at right-up;
# loc=2 makes legend locating at left-up;
# loc=3 makes legend locating at left-down
# loc=4 makes legend locating at right-down
# Just have one point in legend
mpl.rc('legend', scatterpoints=1)
mpl.rc('legend', fontsize=45)
mpl.rc('legend', markerscale=1.5)
plt.legend(loc=4)
plt.savefig(
os.path.join(PLOT_DIR, 'Plot_newSize', 'Plot_variable_VP_different_RLOCs.eps'), dpi=300, transparent=True)
# plt.show() | gpl-2.0 | -854,535,159,552,120,600 | 28.926174 | 124 | 0.606999 | false |
xsixing/blaze | blaze/io/sql/ops.py | 1 | 3865 | """SQL implementations of element-wise ufuncs."""
from __future__ import absolute_import, division, print_function
from ...compute.function import function, kernel
from ...compute.ops import ufuncs
from .kernel import sql_kernel, SQL
from .syntax import Call, Expr, QOrderBy, QWhere, And, Or, Not
def sqlfunction(signature):
def decorator(f):
blaze_func = function(signature)(f)
kernel(blaze_func, SQL, f, signature)
return blaze_func
return decorator
def define_unop(signature, name, op):
"""Define a unary sql operator"""
def unop(x):
return Expr([op, x])
unop.__name__ = name
_implement(unop, signature)
return unop
def define_binop(signature, name, op):
"""Define a binary sql operator"""
def binop(a, b):
return Expr([a, op, b])
binop.__name__ = name
_implement(binop, signature)
return binop
def _implement(f, signature):
name = f.__name__
blaze_func = getattr(ufuncs, name)
#print("implement", f, signature, blaze_func)
sql_kernel(blaze_func, f, signature)
# Arithmetic
add = define_binop("(A... * T, A... * T) -> A... * T", "add", "+")
multiply = define_binop("(A... * T, A... * T) -> A... * T", "multiply", "*")
subtract = define_binop("(A... * T, A... * T) -> A... * T", "subtract", "-")
floordiv = define_binop("(A... * T, A... * T) -> A... * T", "floor_divide", "/")
divide = define_binop("(A... * T, A... * T) -> A... * T", "divide", "/")
truediv = define_binop("(A... * T, A... * T) -> A... * T", "true_divide", "/")
mod = define_binop("(A... * T, A... * T) -> A... * T", "mod", "%")
negative = define_unop("(A... * T) -> A... * T", "negative", "-")
# Compare
eq = define_binop("(A... * T, A... * T) -> A... * bool", "equal", "==")
ne = define_binop("(A... * T, A... * T) -> A... * bool", "not_equal", "!=")
lt = define_binop("(A... * T, A... * T) -> A... * bool", "less", "<")
le = define_binop("(A... * T, A... * T) -> A... * bool", "less_equal", "<=")
gt = define_binop("(A... * T, A... * T) -> A... * bool", "greater", ">")
ge = define_binop("(A... * T, A... * T) -> A... * bool", "greater_equal", ">=")
# Logical
logical_and = define_binop("(A... * bool, A... * bool) -> A... * bool",
"logical_and", "AND")
logical_or = define_binop("(A... * bool, A... * bool) -> A... * bool",
"logical_or", "OR")
logical_not = define_unop("(A... * bool) -> A... * bool", "logical_not", "NOT")
def logical_xor(a, b):
# Potential exponential code generation...
return And(Or(a, b), Not(And(a, b)))
kernel(ufuncs.logical_xor, SQL, logical_xor,
"(A... * bool, A... * bool) -> A... * bool")
# SQL Functions
@sqlfunction('(A * DType) -> DType')
def sum(col):
return Call('SUM', [col])
@sqlfunction('(A * DType) -> DType')
def avg(col):
return Call('AVG', [col])
@sqlfunction('(A * DType) -> DType')
def min(col):
return Call('MIN', [col])
@sqlfunction('(A * DType) -> DType')
def max(col):
return Call('MAX', [col])
# SQL Join, Where, Group by, Order by
def merge(left, right, how='left', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=True):
"""
Join two tables.
"""
raise NotImplementedError
def index(col, index, order=None):
"""
Index a table or column with a predicate.
view = merge(table1, table2)
result = view[table1.id == table2.id]
or
avg(table1.age[table1.state == 'TX'])
"""
result = sqlindex(col, index)
if order:
result = sqlorder(result, order)
return result
@sqlfunction('(A * S, A * B) -> var * S')
def sqlindex(col, where):
return QWhere(col, where)
@sqlfunction('(A * S, A * B) -> A * S')
def sqlorder(col, by):
if not isinstance(by, (tuple, list)):
by = [by]
return QOrderBy(col, by)
| bsd-3-clause | -2,212,009,295,683,632,400 | 28.280303 | 80 | 0.542044 | false |
dbiesecke/dbiesecke.github.io | repo/script.module.urlresolver/lib/urlresolver/plugins/alldebrid.py | 1 | 14611 | """
urlresolver Kodi Addon
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from urllib import quote_plus
from urllib2 import HTTPError
import json
from urlresolver import common
from urlresolver.common import i18n
from urlresolver.resolver import UrlResolver, ResolverError
logger = common.log_utils.Logger.get_logger(__name__)
logger.disable()
AGENT = 'URLResolver for Kodi'
VERSION = common.addon_version
USER_AGENT = '%s/%s' % (AGENT, VERSION)
FORMATS = common.VIDEO_FORMATS
api_url = 'https://api.alldebrid.com'
class AllDebridResolver(UrlResolver):
name = "AllDebrid"
domains = ['*']
def __init__(self):
self.net = common.Net()
self.hosters = None
self.hosts = None
self.headers = {'User-Agent': USER_AGENT}
def get_media_url(self, host, media_id, cached_only=False):
try:
if media_id.lower().startswith('magnet:'):
r = re.search('''magnet:.+?urn:([a-zA-Z0-9]+):([a-zA-Z0-9]+)''', media_id, re.I)
if r:
_hash, _format = r.group(2), r.group(1)
if self.__check_cache(_hash):
logger.log_debug('AllDebrid: BTIH %s is readily available to stream' % _hash)
transfer_id = self.__create_transfer(_hash)
else:
if self.get_setting('cached_only') == 'true' or cached_only:
raise ResolverError('AllDebrid: Cached torrents only allowed to be initiated')
else:
transfer_id = self.__create_transfer(_hash)
self.__initiate_transfer(transfer_id)
transfer_info = self.__list_transfer(transfer_id)
for _link, _file in transfer_info.get('links').items():
if any(_file.lower().endswith(x) for x in FORMATS):
media_id = _link.replace("\/", "/")
break
self.__delete_transfer(transfer_id)
url = '%s/link/unlock?agent=%s&version=%s&token=%s&link=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), media_id)
result = self.net.http_GET(url, headers=self.headers).content
except HTTPError as e:
try:
js_result = json.loads(e.read())
if 'error' in js_result:
msg = '%s (%s)' % (js_result.get('error'), js_result.get('errorCode'))
else:
msg = 'Unknown Error (1)'
except:
msg = 'Unknown Error (2)'
raise ResolverError('AllDebrid Error: %s (%s)' % (msg, e.code))
else:
js_result = json.loads(result)
logger.log_debug('AllDebrid resolve: [%s]' % js_result)
if 'error' in js_result:
raise ResolverError('AllDebrid Error: %s (%s)' % (js_result.get('error'), js_result.get('errorCode')))
elif js_result.get('success', False):
if js_result.get('infos').get('link'):
return js_result.get('infos').get('link')
raise ResolverError('AllDebrid: no stream returned')
def __check_cache(self, media_id):
try:
url = '%s/magnet/instant?agent=%s&version=%s&token=%s&magnet=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), media_id)
result = self.net.http_GET(url, headers=self.headers).content
result = json.loads(result)
if result.get('success', False):
response = result.get('instant', False)
return response
except:
pass
return False
def __list_transfer(self, transfer_id):
try:
url = '%s/magnet/status?agent=%s&version=%s&token=%s&id=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), transfer_id)
response = self.net.http_GET(url, headers=self.headers).content
result = json.loads(response)
if result.get('success', False):
return result
except:
pass
return {}
def __create_transfer(self, media_id):
try:
url = '%s/magnet/upload?agent=%s&version=%s&token=%s&magnet=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), media_id)
response = self.net.http_GET(url, headers=self.headers).content
result = json.loads(response)
if result.get('success', False):
logger.log_debug('Transfer successfully started to the AllDebrid cloud')
return result.get('id', "")
except:
pass
return ""
def __initiate_transfer(self, transfer_id, interval=5):
try:
transfer_info = self.__list_transfer(transfer_id)
if transfer_info:
line1 = transfer_info.get('filename')
line2 = 'Saving torrent to UptoBox via AllDebrid'
line3 = transfer_info.get('status')
with common.kodi.ProgressDialog('Resolve URL AllDebrid Transfer', line1, line2, line3) as pd:
while not transfer_info.get('statusCode') == 4:
common.kodi.sleep(1000 * interval)
transfer_info = self.__list_transfer(transfer_id)
file_size = transfer_info.get('size')
line1 = transfer_info.get('filename')
if transfer_info.get('statusCode') == 1:
download_speed = round(float(transfer_info.get('downloadSpeed')) / (1000**2), 2)
progress = int(float(transfer_info.get('downloaded')) / file_size * 100) if file_size > 0 else 0
line3 = "Downloading at %s MB/s from %s peers, %s%% of %sGB completed" % (download_speed, transfer_info.get('seeders'), progress, round(float(file_size) / (1000 ** 3), 2))
elif transfer_info.get('statusCode') == 3:
upload_speed = round(float(transfer_info.get('uploadSpeed')) / (1000 ** 2), 2)
progress = int(float(transfer_info.get('uploaded')) / file_size * 100) if file_size > 0 else 0
line3 = "Uploading at %s MB/s, %s%% of %s GB completed" % (upload_speed, progress, round(float(file_size) / (1000 ** 3), 2))
else:
line3 = transfer_info.get('status')
progress = 0
logger.log_debug(line3)
pd.update(progress, line1=line1, line3=line3)
if pd.is_canceled():
self.__delete_transfer(transfer_id)
# self.__delete_folder()
raise ResolverError('Transfer ID %s :: Canceled by user' % transfer_id)
elif 5 <= transfer_info.get('statusCode') <= 10:
self.__delete_transfer(transfer_id)
# self.__delete_folder()
raise ResolverError('Transfer ID %s :: %s' % (transfer_id, transfer_info.get('status')))
common.kodi.sleep(1000 * interval) # allow api time to generate the links
return
except Exception as e:
self.__delete_transfer(transfer_id)
raise ResolverError('Transfer ID %s :: %s' % (transfer_id, e))
def __delete_transfer(self, transfer_id):
try:
url = '%s/magnet/delete?agent=%s&version=%s&token=%s&id=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), transfer_id)
response = self.net.http_GET(url, headers=self.headers).content
result = json.loads(response)
if result.get('success', False):
logger.log_debug('Transfer ID "%s" deleted from the AllDebrid cloud' % transfer_id)
return True
except:
pass
return False
def get_url(self, host, media_id):
return media_id
def get_host_and_id(self, url):
return 'www.alldebrid.com', url
@common.cache.cache_method(cache_limit=8)
def get_all_hosters(self):
hosters = []
url = '%s/user/hosts?agent=%s&version=%s&token=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'))
try:
js_result = self.net.http_GET(url, headers=self.headers).content
js_data = json.loads(js_result)
if js_data.get('success', False):
regexes = [value.get('regexp').replace('\/', '/') for key, value in js_data.get('hosts', {}).iteritems()
if value.get('status', False)]
logger.log_debug('AllDebrid hosters : %s' % regexes)
hosters = [re.compile(regex) for regex in regexes]
else:
logger.log_error('Error getting AD Hosters')
except Exception as e:
logger.log_error('Error getting AD Hosters: %s' % e)
return hosters
@common.cache.cache_method(cache_limit=8)
def get_hosts(self):
hosts = []
url = '%s/hosts/domains' % api_url
try:
js_result = self.net.http_GET(url, headers=self.headers).content
js_data = json.loads(js_result)
if js_data.get('success', False):
hosts = [host.replace('www.', '') for host in js_data.get('hosts', [])]
if self.get_setting('torrents') == 'true':
hosts.extend([u'torrent', u'magnet'])
logger.log_debug('AllDebrid hosts : %s' % hosts)
else:
logger.log_error('Error getting AD Hosters')
except Exception as e:
logger.log_error('Error getting AD Hosts: %s' % e)
return hosts
def valid_url(self, url, host):
logger.log_debug('in valid_url %s : %s' % (url, host))
if url:
if url.lower().startswith('magnet:') and self.get_setting('torrents') == 'true':
return True
if self.hosters is None:
self.hosters = self.get_all_hosters()
for regexp in self.hosters:
# logger.log_debug('AllDebrid checking host : %s' %str(regexp))
if re.search(regexp, url):
logger.log_debug('AllDebrid Match found')
return True
elif host:
if self.hosts is None:
self.hosts = self.get_hosts()
if any(host in item for item in self.hosts):
return True
return False
# SiteAuth methods
def login(self):
if not self.get_setting('token'):
self.authorize_resolver()
def reset_authorization(self):
self.set_setting('token', '')
def authorize_resolver(self):
url = '%s/pin/get?agent=%s&version=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION))
js_result = self.net.http_GET(url, headers=self.headers).content
js_data = json.loads(js_result)
line1 = 'Go to URL: %s' % (js_data.get('base_url').replace('\/', '/'))
line2 = 'When prompted enter: %s' % (js_data.get('pin'))
with common.kodi.CountdownDialog('Resolve Url All Debrid Authorization', line1, line2,
countdown=js_data.get('expired_in', 120)) as cd:
result = cd.start(self.__check_auth, [js_data.get('check_url').replace('\/', '/')])
# cancelled
if result is None:
return
return self.__get_token(js_data.get('check_url').replace('\/', '/'))
def __get_token(self, url):
try:
js_result = self.net.http_GET(url, headers=self.headers).content
js_data = json.loads(js_result)
if js_data.get("success", False):
token = js_data.get('token', '')
logger.log_debug('Authorizing All Debrid Result: |%s|' % token)
self.set_setting('token', token)
return True
except Exception as e:
logger.log_debug('All Debrid Authorization Failed: %s' % e)
return False
def __check_auth(self, url):
activated = False
try:
js_result = self.net.http_GET(url, headers=self.headers).content
js_data = json.loads(js_result)
if js_data.get("success", False):
activated = js_data.get('activated', False)
except Exception as e:
logger.log_debug('Exception during AD auth: %s' % e)
return activated
@classmethod
def get_settings_xml(cls):
xml = super(cls, cls).get_settings_xml()
# xml.append('<setting id="%s_autopick" type="bool" label="%s" default="false"/>' % (cls.__name__, i18n('auto_primary_link')))
xml.append('<setting id="%s_torrents" type="bool" label="%s" default="true"/>' % (cls.__name__, i18n('torrents')))
xml.append('<setting id="%s_cached_only" enable="eq(-1,true)" type="bool" label="%s" default="false" />' % (cls.__name__, i18n('cached_only')))
xml.append('<setting id="%s_auth" type="action" label="%s" action="RunPlugin(plugin://script.module.urlresolver/?mode=auth_ad)"/>' % (cls.__name__, i18n('auth_my_account')))
xml.append('<setting id="%s_reset" type="action" label="%s" action="RunPlugin(plugin://script.module.urlresolver/?mode=reset_ad)"/>' % (cls.__name__, i18n('reset_my_auth')))
xml.append('<setting id="%s_token" visible="false" type="text" default=""/>' % cls.__name__)
return xml
@classmethod
def _is_enabled(cls):
return cls.get_setting('enabled') == 'true' and cls.get_setting('token')
@classmethod
def isUniversal(self):
return True
| mit | 5,094,376,378,971,055,000 | 45.237342 | 199 | 0.549038 | false |
ireapps/census | dataprocessing/load_sf_data_2010.py | 1 | 1395 | #!/usr/bin/env python
import sys
from csvkit.unicsv import UnicodeCSVReader
from pymongo import objectid
import config
import utils
if len(sys.argv) < 2:
sys.exit('You must provide the filename of a CSV as an argument to this script.')
FILENAME = sys.argv[1]
YEAR = '2010'
collection = utils.get_geography_collection()
with open(FILENAME) as f:
rows = UnicodeCSVReader(f)
headers = rows.next()
updates = 0
row_count = 0
for row in rows:
row_count += 1
row_dict = dict(zip(headers, row))
xref = utils.xref_from_row_dict(row_dict)
geography = utils.find_geography_by_xref(collection, xref, fields=['data'])
if not geography:
continue
if YEAR not in geography['data']:
geography['data'][YEAR] = {}
tables = {}
for k, v in row_dict.items():
# Format table names to match labels
t = utils.parse_table_from_key(k)
if t not in tables:
tables[t] = {}
tables[t][k] = v
for k, v in tables.items():
geography['data'][YEAR][k] = v
collection.update({ '_id': objectid.ObjectId(geography['_id']) }, { '$set': { 'data': geography['data'] } }, safe=True)
updates += 1
print "File: %s" % FILENAME
print ' Row count: %i' % row_count
print ' Updated: %i' % updates
| mit | -8,988,254,867,123,021,000 | 22.25 | 127 | 0.573477 | false |
Joergen/zamboni | mkt/reviewers/helpers.py | 1 | 6578 | import datetime
import urlparse
from django.utils.encoding import smart_str
import jinja2
import waffle
from jingo import register
from tower import ugettext as _, ugettext_lazy as _lazy
from access import acl
from amo.helpers import impala_breadcrumbs
from amo.urlresolvers import reverse
from mkt.developers.helpers import mkt_page_title
from mkt.reviewers.utils import (AppsReviewing, clean_sort_param,
create_sort_link, device_queue_search)
@register.function
@jinja2.contextfunction
def reviewers_breadcrumbs(context, queue=None, items=None):
"""
Wrapper function for ``breadcrumbs``. Prepends 'Editor Tools'
breadcrumbs.
**queue**
Explicit queue type to set.
**items**
list of [(url, label)] to be inserted after Add-on.
"""
crumbs = [(reverse('reviewers.home'), _('Reviewer Tools'))]
if queue:
queues = {'pending': _('Apps'),
'rereview': _('Re-reviews'),
'updates': _('Updates'),
'escalated': _('Escalations'),
'device': _('Device'),
'moderated': _('Moderated Reviews'),
'reviewing': _('Reviewing'),
'pending_themes': _('Pending Themes'),
'flagged_themes': _('Flagged Themes'),
'rereview_themes': _('Update Themes')}
if items:
url = reverse('reviewers.apps.queue_%s' % queue)
else:
# The Addon is the end of the trail.
url = None
crumbs.append((url, queues[queue]))
if items:
crumbs.extend(items)
return impala_breadcrumbs(context, crumbs, add_default=True)
@register.function
@jinja2.contextfunction
def reviewers_page_title(context, title=None, addon=None):
if addon:
title = u'%s | %s' % (title, addon.name)
else:
section = _lazy('Reviewer Tools')
title = u'%s | %s' % (title, section) if title else section
return mkt_page_title(context, title)
@register.function
@jinja2.contextfunction
def queue_tabnav(context):
"""
Returns tuple of tab navigation for the queue pages.
Each tuple contains three elements: (named_url. tab_code, tab_text)
"""
request = context['request']
counts = context['queue_counts']
apps_reviewing = AppsReviewing(request).get_apps()
# Apps.
if acl.action_allowed(request, 'Apps', 'Review'):
rv = [
('reviewers.apps.queue_pending', 'pending',
_('Apps ({0})', counts['pending']).format(counts['pending'])),
('reviewers.apps.queue_rereview', 'rereview',
_('Re-reviews ({0})', counts['rereview']).format(
counts['rereview'])),
('reviewers.apps.queue_updates', 'updates',
_('Updates ({0})', counts['updates']).format(counts['updates'])),
]
if acl.action_allowed(request, 'Apps', 'ReviewEscalated'):
rv.append(('reviewers.apps.queue_escalated', 'escalated',
_('Escalations ({0})', counts['escalated']).format(
counts['escalated'])))
rv.extend([
('reviewers.apps.queue_moderated', 'moderated',
_('Moderated Reviews ({0})', counts['moderated'])
.format(counts['moderated'])),
('reviewers.apps.apps_reviewing', 'reviewing',
_('Reviewing ({0})').format(len(apps_reviewing))),
])
else:
rv = []
if waffle.switch_is_active('buchets') and 'pro' in request.GET:
device_srch = device_queue_search(request)
rv.append(('reviewers.apps.queue_device', 'device',
_('Device ({0})').format(device_srch.count()),))
return rv
@register.function
@jinja2.contextfunction
def logs_tabnav(context):
"""
Returns tuple of tab navigation for the log pages.
Each tuple contains three elements: (named url, tab_code, tab_text)
"""
rv = [
('reviewers.apps.logs', 'apps', _('Reviews'))
]
return rv
@register.function
@jinja2.contextfunction
def logs_tabnav_themes(context):
"""
Returns tuple of tab navigation for the log pages.
Each tuple contains three elements: (named url, tab_code, tab_text)
"""
rv = [
('reviewers.themes.logs', 'themes', _('Reviews'))
]
if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'):
rv.append(('reviewers.themes.deleted', 'deleted', _('Deleted')))
return rv
@register.function
@jinja2.contextfunction
def queue_tabnav_themes(context):
"""Similar to queue_tabnav, but for themes."""
tabs = []
if acl.action_allowed(context['request'], 'Personas', 'Review'):
tabs.append((
'reviewers.themes.list', 'pending_themes', _('Pending'),
))
if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'):
tabs.append((
'reviewers.themes.list_flagged', 'flagged_themes', _('Flagged'),
))
tabs.append((
'reviewers.themes.list_rereview', 'rereview_themes',
_('Updates'),
))
return tabs
@register.function
@jinja2.contextfunction
def queue_tabnav_themes_interactive(context):
"""Tabnav for the interactive shiny theme queues."""
tabs = []
if acl.action_allowed(context['request'], 'Personas', 'Review'):
tabs.append((
'reviewers.themes.queue_themes', 'pending', _('Pending'),
))
if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'):
tabs.append((
'reviewers.themes.queue_flagged', 'flagged', _('Flagged'),
))
tabs.append((
'reviewers.themes.queue_rereview', 'rereview', _('Updates'),
))
return tabs
@register.function
@jinja2.contextfunction
def sort_link(context, pretty_name, sort_field):
"""Get table header sort links.
pretty_name -- name displayed on table header
sort_field -- name of get parameter, referenced to in views
"""
request = context['request']
sort, order = clean_sort_param(request)
# Copy search/filter GET parameters.
get_params = [(k, v) for k, v in
urlparse.parse_qsl(smart_str(request.META['QUERY_STRING']))
if k not in ('sort', 'order')]
return create_sort_link(pretty_name, sort_field, get_params,
sort, order)
@register.function
@jinja2.contextfunction
def is_expired_lock(context, lock):
return lock.expiry < datetime.datetime.now()
| bsd-3-clause | -5,529,736,335,256,113,000 | 30.028302 | 78 | 0.59471 | false |
asterix135/infonex_crm | crm/tests.py | 1 | 5877 | import datetime
from django.utils import timezone
from django.test import TestCase
from .models import Person
from django.core.urlresolvers import reverse
class PersonMethodTests(TestCase):
def test_was_added_recently_with_future_created_date(self):
"""
was_added_recently() should return False for persons whose created_date
is in the future
"""
time = timezone.now() + datetime.timedelta(days=30)
future_person = Person(created_date=time)
self.assertEqual(future_person.was_added_recently(), False)
def test_was_modified_recently_with_future_last_modified(self):
"""
was_modified_recently() should return False for persons whose
last_modified is in the future
"""
time = timezone.now() + datetime.timedelta(days=30)
future_person = Person(last_modified=time)
self.assertEqual(future_person.was_modified_recently(), False)
def test_was_added_recently_with_old_added_date(self):
"""
was_added_recently() should return False for persons whose created_date
is older than 14 days
"""
time = timezone.now() - datetime.timedelta(days=30)
old_person = Person(created_date=time)
self.assertEqual(old_person.was_added_recently(), False)
def test_was_modified_recently_with_old_last_modified(self):
"""
was_modified_recently() should return False for persons whose
last_modified is older than 14 days
"""
time = timezone.now() - datetime.timedelta(days=30)
old_person = Person(last_modified=time)
self.assertEqual(old_person.was_modified_recently(), False)
def test_was_added_recently_with_recent_added_date(self):
"""
was_added_recently() should return True for persons whose created_date
is within 14 days
"""
time = timezone.now() - datetime.timedelta(days=10)
recent_person = Person(created_date=time)
self.assertEqual(recent_person.was_added_recently(), True)
def test_was_modified_recently_with_recent_added_date(self):
"""
was_added_recently() should return True for persons whose
last_modified is within 14 days
"""
time = timezone.now() - datetime.timedelta(days=10)
recent_person = Person(last_modified=time)
self.assertEqual(recent_person.was_modified_recently(), True)
def create_person(person_fname, person_lname, person_co, days):
"""
Creates a person with given first name, last name & company and created
and modified the given number of 'days' offset to now (negative for past
created, postive for future created)
:param person_fname: string
:param person_lname: string
:param person_co: string
:param days: integer
"""
time = timezone.now() + datetime.timedelta(days=days)
return Person.objects.create(first_name=person_fname,
last_name=person_lname,
company=person_co,
created_date=time,
last_modified=time)
class PersonViewTests(TestCase):
def test_index_view_with_no_people(self):
"""
If no persons exist, an appropriate message should be displayed.
"""
response = self.client.get(reverse('crm:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No people available.')
self.assertQuerysetEqual(response.context['latest_person_list'], [])
def test_index_with_a_past_person(self):
"""
Persons with a created date in the past should be displayed on the
index page.
"""
create_person(person_fname='First', person_lname='Last',
person_co="Company", days=-10)
response = self.client.get(reverse('crm:index'))
self.assertQuerysetEqual(
response.context['latest_person_list'],
['<Person: First Last, Company>']
)
def test_index_view_with_a_future_person(self):
"""
Persons with a creation date in the future should not be displayed on
the index page.
"""
create_person(person_fname='First', person_lname='Last',
person_co="Company", days=10)
response = self.client.get(reverse('crm:index'))
self.assertContains(response, 'No people available.',
status_code=200)
self.assertQuerysetEqual(
response.context['latest_person_list'], [])
def test_index_view_with_future_and_past_person(self):
"""
Even if past and future-dated persons exist, only past persons should
be displayed.
"""
create_person(person_fname='First1', person_lname='Last1',
person_co="Company1", days=-10)
create_person(person_fname='First2', person_lname='Last2',
person_co="Company2", days=10)
response = self.client.get(reverse('crm:index'))
self.assertQuerysetEqual(
response.context['latest_person_list'],
['<Person: First1 Last1, Company1>']
)
def test_index_view_with_two_past_people(self):
"""
The persons index page may display multiple people
"""
create_person(person_fname='First1', person_lname='Last1',
person_co="Company1", days=-10)
create_person(person_fname='First2', person_lname='Last2',
person_co="Company2", days=-5)
response = self.client.get(reverse('crm:index'))
self.assertQuerysetEqual(
response.context['latest_person_list'],
['<Person: First2 Last2, Company2>',
'<Person: First1 Last1, Company1>']
)
| mit | -7,130,008,750,136,140,000 | 38.979592 | 79 | 0.614769 | false |
dstroppa/openstack-smartos-nova-grizzly | nova/tests/integrated/test_api_samples.py | 1 | 149397 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import copy
import datetime
import inspect
import json
import os
import re
import urllib
import uuid as uuid_lib
import coverage
from lxml import etree
from oslo.config import cfg
from nova.api.metadata import password
from nova.api.openstack.compute.contrib import coverage_ext
from nova.api.openstack.compute.contrib import fping
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.cloudpipe import pipelib
from nova.compute import api as compute_api
from nova.compute import manager as compute_manager
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.network import api as network_api
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
import nova.quota
from nova.scheduler import driver
from nova.servicegroup import api as service_group_api
from nova import test
from nova.tests.api.openstack.compute.contrib import test_coverage_ext
from nova.tests.api.openstack.compute.contrib import test_fping
from nova.tests.api.openstack.compute.contrib import test_networks
from nova.tests.api.openstack.compute.contrib import test_services
from nova.tests.api.openstack import fakes
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests import fake_instance_actions
from nova.tests import fake_network
from nova.tests.image import fake
from nova.tests.integrated import integrated_helpers
from nova.tests import utils as test_utils
from nova import utils
from nova.volume import cinder
CONF = cfg.CONF
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('db_check_interval', 'nova.cells.state', group='cells')
LOG = logging.getLogger(__name__)
class NoMatch(test.TestingException):
pass
class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
ctype = 'json'
all_extensions = False
extension_name = None
def setUp(self):
self.flags(use_ipv6=False,
osapi_compute_link_prefix=self._get_host(),
osapi_glance_link_prefix=self._get_glance_host())
if not self.all_extensions:
ext = [self.extension_name] if self.extension_name else []
self.flags(osapi_compute_extension=ext)
super(ApiSampleTestBase, self).setUp()
fake_network.stub_compute_with_ips(self.stubs)
self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
def _pretty_data(self, data):
if self.ctype == 'json':
data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True,
indent=4)
else:
if data is None:
# Likely from missing XML file.
return ""
xml = etree.XML(data)
data = etree.tostring(xml, encoding="UTF-8",
xml_declaration=True, pretty_print=True)
return '\n'.join(line.rstrip() for line in data.split('\n')).strip()
def _objectify(self, data):
if not data:
return {}
if self.ctype == 'json':
# NOTE(vish): allow non-quoted replacements to survive json
data = re.sub(r'([^"])%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data)
return jsonutils.loads(data)
else:
def to_dict(node):
ret = {}
if node.items():
ret.update(dict(node.items()))
if node.text:
ret['__content__'] = node.text
if node.tag:
ret['__tag__'] = node.tag
if node.nsmap:
ret['__nsmap__'] = node.nsmap
for element in node:
ret.setdefault(node.tag, [])
ret[node.tag].append(to_dict(element))
return ret
return to_dict(etree.fromstring(data))
@classmethod
def _get_sample_path(cls, name, dirname, suffix=''):
parts = [dirname]
parts.append('api_samples')
if cls.all_extensions:
parts.append('all_extensions')
if cls.extension_name:
alias = importutils.import_class(cls.extension_name).alias
parts.append(alias)
parts.append(name + "." + cls.ctype + suffix)
return os.path.join(*parts)
@classmethod
def _get_sample(cls, name):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.join(dirname, "../../../doc")
return cls._get_sample_path(name, dirname)
@classmethod
def _get_template(cls, name):
dirname = os.path.dirname(os.path.abspath(__file__))
return cls._get_sample_path(name, dirname, suffix='.tpl')
def _read_template(self, name):
template = self._get_template(name)
with open(template) as inf:
return inf.read().strip()
def _write_template(self, name, data):
with open(self._get_template(name), 'w') as outf:
outf.write(data)
def _write_sample(self, name, data):
with open(self._get_sample(name), 'w') as outf:
outf.write(data)
def _compare_result(self, subs, expected, result, result_str):
matched_value = None
if isinstance(expected, dict):
if not isinstance(result, dict):
raise NoMatch(_('%(result_str)s: %(result)s is not a dict.')
% locals())
ex_keys = sorted(expected.keys())
res_keys = sorted(result.keys())
if ex_keys != res_keys:
ex_delta = []
res_delta = []
for key in ex_keys:
if key not in res_keys:
ex_delta.append(key)
for key in res_keys:
if key not in ex_keys:
res_delta.append(key)
raise NoMatch(
_('Dictionary key mismatch:\n'
'Extra key(s) in template:\n%(ex_delta)s\n'
'Extra key(s) in %(result_str)s:\n%(res_delta)s\n')
% locals())
for key in ex_keys:
res = self._compare_result(subs, expected[key], result[key],
result_str)
matched_value = res or matched_value
elif isinstance(expected, list):
if not isinstance(result, list):
raise NoMatch(
_('%(result_str)s: %(result)s is not a list.') % locals())
expected = expected[:]
extra = []
for res_obj in result:
for i, ex_obj in enumerate(expected):
try:
matched_value = self._compare_result(subs, ex_obj,
res_obj,
result_str)
del expected[i]
break
except NoMatch:
pass
else:
extra.append(res_obj)
error = []
if expected:
error.append(_('Extra list items in template:'))
error.extend([repr(o) for o in expected])
if extra:
error.append(_('Extra list items in %(result_str)s:')
% locals())
error.extend([repr(o) for o in extra])
if error:
raise NoMatch('\n'.join(error))
elif isinstance(expected, basestring) and '%' in expected:
# NOTE(vish): escape stuff for regex
for char in '[]<>?':
expected = expected.replace(char, '\\%s' % char)
# NOTE(vish): special handling of subs that are not quoted. We are
# expecting an int but we had to pass in a string
# so the json would parse properly.
if expected.startswith("%(int:"):
result = str(result)
expected = expected.replace('int:', '')
expected = expected % subs
expected = '^%s$' % expected
match = re.match(expected, result)
if not match:
raise NoMatch(
_('Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: %(result)s')
% locals())
try:
matched_value = match.group('id')
except IndexError:
if match.groups():
matched_value = match.groups()[0]
else:
if isinstance(expected, basestring):
# NOTE(danms): Ignore whitespace in this comparison
expected = expected.strip()
result = result.strip()
if expected != result:
raise NoMatch(
_('Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: %(result)s')
% locals())
return matched_value
def generalize_subs(self, subs, vanilla_regexes):
"""Give the test a chance to modify subs after the server response
was verified, and before the on-disk doc/api_samples file is checked.
This may be needed by some tests to convert exact matches expected
from the server into pattern matches to verify what is in the
sample file.
If there are no changes to be made, subs is returned unharmed.
"""
return subs
def _verify_response(self, name, subs, response):
response_data = response.read()
response_data = self._pretty_data(response_data)
if not os.path.exists(self._get_template(name)):
self._write_template(name, response_data)
template_data = response_data
else:
template_data = self._read_template(name)
if (self.generate_samples and
not os.path.exists(self._get_sample(name))):
self._write_sample(name, response_data)
sample_data = response_data
else:
with file(self._get_sample(name)) as sample:
sample_data = sample.read()
try:
template_data = self._objectify(template_data)
response_data = self._objectify(response_data)
response_result = self._compare_result(subs, template_data,
response_data, "Response")
# NOTE(danms): replace some of the subs with patterns for the
# doc/api_samples check, which won't have things like the
# correct compute host name. Also let the test do some of its
# own generalization, if necessary
vanilla_regexes = self._get_regexes()
subs['compute_host'] = vanilla_regexes['host_name']
subs['id'] = vanilla_regexes['id']
subs = self.generalize_subs(subs, vanilla_regexes)
sample_data = self._objectify(sample_data)
self._compare_result(subs, template_data, sample_data, "Sample")
return response_result
except NoMatch:
raise
def _get_host(self):
return 'http://openstack.example.com'
def _get_glance_host(self):
return 'http://glance.openstack.example.com'
def _get_regexes(self):
if self.ctype == 'json':
text = r'(\\"|[^"])*'
else:
text = r'[^<]*'
return {
# NOTE(treinish): Could result in a false positive, but it
# shouldn't be an issue for this case.
'timestamp': '\d{4}-[0,1]\d-[0-3]\d[ ,T]'
'\d{2}:\d{2}:\d{2}'
'(Z|(\+|-)\d{2}:\d{2}|\.\d{6}|)',
'password': '[0-9a-zA-Z]{1,12}',
'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12})',
'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}',
'reservation_id': 'r-[0-9a-zA-Z]{8}',
'private_key': '-----BEGIN RSA PRIVATE KEY-----'
'[a-zA-Z0-9\n/+=]*'
'-----END RSA PRIVATE KEY-----',
'public_key': 'ssh-rsa[ a-zA-Z0-9/+=]*'
'Generated by Nova',
'fingerprint': '([0-9a-f]{2}:){15}[0-9a-f]{2}',
# '[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:'
# '[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:'
# '[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:'
# '[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}',
'host': self._get_host(),
'host_name': '[0-9a-z]{32}',
'glance_host': self._get_glance_host(),
'compute_host': self.compute.host,
'text': text,
'int': '[0-9]+',
}
def _get_response(self, url, method, body=None, strip_version=False):
headers = {}
headers['Content-Type'] = 'application/' + self.ctype
headers['Accept'] = 'application/' + self.ctype
return self.api.api_request(url, body=body, method=method,
headers=headers, strip_version=strip_version)
def _do_get(self, url, strip_version=False):
return self._get_response(url, 'GET', strip_version=strip_version)
def _do_post(self, url, name, subs, method='POST'):
body = self._read_template(name) % subs
sample = self._get_sample(name)
if self.generate_samples and not os.path.exists(sample):
self._write_sample(name, body)
return self._get_response(url, method, body)
def _do_put(self, url, name, subs):
return self._do_post(url, name, subs, method='PUT')
def _do_delete(self, url):
return self._get_response(url, 'DELETE')
class ApiSamplesTrap(ApiSampleTestBase):
"""Make sure extensions don't get added without tests."""
all_extensions = True
def _get_extensions_tested(self):
tests = []
for attr in globals().values():
if not inspect.isclass(attr):
continue # Skip non-class objects
if not issubclass(attr, integrated_helpers._IntegratedTestBase):
continue # Skip non-test classes
if attr.extension_name is None:
continue # Skip base tests
cls = importutils.import_class(attr.extension_name)
tests.append(cls.alias)
return tests
def _get_extensions(self):
extensions = []
response = self._do_get('extensions')
for extension in jsonutils.loads(response.read())['extensions']:
extensions.append(str(extension['alias']))
return extensions
def test_all_extensions_have_samples(self):
# NOTE(danms): This is a list of extensions which are currently
# in the tree but that don't (yet) have tests. This list should
# NOT be allowed to grow, and should shrink to zero (and be
# removed) soon.
do_not_approve_additions = []
do_not_approve_additions.append('os-create-server-ext')
tests = self._get_extensions_tested()
extensions = self._get_extensions()
missing_tests = []
for extension in extensions:
# NOTE(danms): if you add tests, remove it from the
# exclusions list
self.assertFalse(extension in do_not_approve_additions and
extension in tests)
# NOTE(danms): if you add an extension, it must come with
# api_samples tests!
if (extension not in tests and
extension not in do_not_approve_additions):
missing_tests.append(extension)
if missing_tests:
LOG.error("Extensions are missing tests: %s" % missing_tests)
self.assertEqual(missing_tests, [])
class VersionsSampleJsonTest(ApiSampleTestBase):
def test_versions_get(self):
response = self._do_get('', strip_version=True)
subs = self._get_regexes()
return self._verify_response('versions-get-resp', subs, response)
class VersionsSampleXmlTest(VersionsSampleJsonTest):
ctype = 'xml'
class ServersSampleBase(ApiSampleTestBase):
def _post_server(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
}
response = self._do_post('servers', 'server-post-req', subs)
self.assertEqual(response.status, 202)
subs = self._get_regexes()
return self._verify_response('server-post-resp', subs, response)
class ServersSampleJsonTest(ServersSampleBase):
def test_servers_post(self):
return self._post_server()
def test_servers_get(self):
uuid = self.test_servers_post()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['hypervisor_hostname'] = r'[\w\.\-]+'
return self._verify_response('server-get-resp', subs, response)
def test_servers_list(self):
uuid = self._post_server()
response = self._do_get('servers')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['id'] = uuid
return self._verify_response('servers-list-resp', subs, response)
def test_servers_details(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['hypervisor_hostname'] = r'[\w\.\-]+'
return self._verify_response('servers-details-resp', subs, response)
class ServersSampleXmlTest(ServersSampleJsonTest):
ctype = 'xml'
class ServersSampleAllExtensionJsonTest(ServersSampleJsonTest):
all_extensions = True
class ServersSampleAllExtensionXmlTest(ServersSampleXmlTest):
all_extensions = True
class ServersSampleHideAddressesJsonTest(ServersSampleJsonTest):
extension_name = '.'.join(('nova.api.openstack.compute.contrib',
'hide_server_addresses',
'Hide_server_addresses'))
class ServersSampleHideAddressesXMLTest(ServersSampleHideAddressesJsonTest):
ctype = 'xml'
class ServersMetadataJsonTest(ServersSampleBase):
def _create_and_set(self, subs):
uuid = self._post_server()
response = self._do_put('servers/%s/metadata' % uuid,
'server-metadata-all-req',
subs)
self.assertEqual(response.status, 200)
self._verify_response('server-metadata-all-resp', subs, response)
return uuid
def generalize_subs(self, subs, vanilla_regexes):
subs['value'] = '(Foo|Bar) Value'
return subs
def test_metadata_put_all(self):
# Test setting all metadata for a server.
subs = {'value': 'Foo Value'}
return self._create_and_set(subs)
def test_metadata_post_all(self):
# Test updating all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
response = self._do_post('servers/%s/metadata' % uuid,
'server-metadata-all-req',
subs)
self.assertEqual(response.status, 200)
self._verify_response('server-metadata-all-resp', subs, response)
def test_metadata_get_all(self):
# Test getting all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata' % uuid)
self.assertEqual(response.status, 200)
self._verify_response('server-metadata-all-resp', subs, response)
def test_metadata_put(self):
# Test putting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
response = self._do_put('servers/%s/metadata/foo' % uuid,
'server-metadata-req',
subs)
self.assertEqual(response.status, 200)
return self._verify_response('server-metadata-resp', subs, response)
def test_metadata_get(self):
# Test getting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata/foo' % uuid)
self.assertEqual(response.status, 200)
return self._verify_response('server-metadata-resp', subs, response)
def test_metadata_delete(self):
# Test deleting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_delete('servers/%s/metadata/foo' % uuid)
self.assertEqual(response.status, 204)
self.assertEqual(response.read(), '')
class ServersMetadataXmlTest(ServersMetadataJsonTest):
ctype = 'xml'
class ServersIpsJsonTest(ServersSampleBase):
def test_get(self):
# Test getting a server's IP information.
uuid = self._post_server()
response = self._do_get('servers/%s/ips' % uuid)
subs = self._get_regexes()
return self._verify_response('server-ips-resp', subs, response)
def test_get_by_network(self):
# Test getting a server's IP information by network id.
uuid = self._post_server()
response = self._do_get('servers/%s/ips/private' % uuid)
subs = self._get_regexes()
return self._verify_response('server-ips-network-resp', subs, response)
class ServersIpsXmlTest(ServersIpsJsonTest):
ctype = 'xml'
class ExtensionsSampleJsonTest(ApiSampleTestBase):
all_extensions = True
def test_extensions_get(self):
response = self._do_get('extensions')
subs = self._get_regexes()
return self._verify_response('extensions-get-resp', subs, response)
class ExtensionsSampleXmlTest(ExtensionsSampleJsonTest):
ctype = 'xml'
class FlavorsSampleJsonTest(ApiSampleTestBase):
def test_flavors_get(self):
response = self._do_get('flavors/1')
subs = self._get_regexes()
return self._verify_response('flavor-get-resp', subs, response)
def test_flavors_list(self):
response = self._do_get('flavors')
subs = self._get_regexes()
return self._verify_response('flavors-list-resp', subs, response)
class FlavorsSampleXmlTest(FlavorsSampleJsonTest):
ctype = 'xml'
class HostsSampleJsonTest(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.hosts.Hosts"
def test_host_startup(self):
response = self._do_get('os-hosts/%s/startup' % self.compute.host)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('host-get-startup', subs, response)
def test_host_reboot(self):
response = self._do_get('os-hosts/%s/reboot' % self.compute.host)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('host-get-reboot', subs, response)
def test_host_shutdown(self):
response = self._do_get('os-hosts/%s/shutdown' % self.compute.host)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('host-get-shutdown', subs, response)
def test_host_maintenance(self):
response = self._do_put('os-hosts/%s' % self.compute.host,
'host-put-maintenance-req', {})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('host-put-maintenance-resp', subs,
response)
def test_host_get(self):
response = self._do_get('os-hosts/%s' % self.compute.host)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('host-get-resp', subs, response)
def test_hosts_list(self):
response = self._do_get('os-hosts')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('hosts-list-resp', subs, response)
class HostsSampleXmlTest(HostsSampleJsonTest):
ctype = 'xml'
class FlavorsSampleAllExtensionJsonTest(FlavorsSampleJsonTest):
all_extensions = True
class FlavorsSampleAllExtensionXmlTest(FlavorsSampleXmlTest):
all_extensions = True
class ImagesSampleJsonTest(ApiSampleTestBase):
def test_images_list(self):
# Get api sample of images get list request.
response = self._do_get('images')
subs = self._get_regexes()
return self._verify_response('images-list-get-resp', subs, response)
def test_image_get(self):
# Get api sample of one single image details request.
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s' % image_id)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['image_id'] = image_id
return self._verify_response('image-get-resp', subs, response)
def test_images_details(self):
# Get api sample of all images details request.
response = self._do_get('images/detail')
subs = self._get_regexes()
return self._verify_response('images-details-get-resp', subs, response)
def test_image_metadata_get(self):
# Get api sample of an image metadata request.
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s/metadata' % image_id)
subs = self._get_regexes()
subs['image_id'] = image_id
return self._verify_response('image-metadata-get-resp', subs, response)
def test_image_metadata_post(self):
# Get api sample to update metadata of an image metadata request.
image_id = fake.get_valid_image_id()
response = self._do_post(
'images/%s/metadata' % image_id,
'image-metadata-post-req', {})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('image-metadata-post-resp',
subs, response)
def test_image_metadata_put(self):
# Get api sample of image metadata put request.
image_id = fake.get_valid_image_id()
response = self._do_put('images/%s/metadata' % image_id,
'image-metadata-put-req', {})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('image-metadata-put-resp',
subs, response)
def test_image_meta_key_get(self):
# Get api sample of an image metadata key request.
image_id = fake.get_valid_image_id()
key = "kernel_id"
response = self._do_get('images/%s/metadata/%s' % (image_id, key))
subs = self._get_regexes()
return self._verify_response('image-meta-key-get', subs, response)
def test_image_meta_key_put(self):
# Get api sample of image metadata key put request.
image_id = fake.get_valid_image_id()
key = "auto_disk_config"
response = self._do_put('images/%s/metadata/%s' % (image_id, key),
'image-meta-key-put-req', {})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('image-meta-key-put-resp',
subs,
response)
class ImagesSampleXmlTest(ImagesSampleJsonTest):
ctype = 'xml'
class LimitsSampleJsonTest(ApiSampleTestBase):
def test_limits_get(self):
response = self._do_get('limits')
subs = self._get_regexes()
return self._verify_response('limit-get-resp', subs, response)
class LimitsSampleXmlTest(LimitsSampleJsonTest):
ctype = 'xml'
class CoverageExtJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.coverage_ext."
"Coverage_ext")
def setUp(self):
super(CoverageExtJsonTests, self).setUp()
def _fake_check_coverage(self):
return False
def _fake_xml_report(self, outfile=None):
return
self.stubs.Set(coverage_ext.CoverageController, '_check_coverage',
_fake_check_coverage)
self.stubs.Set(coverage, 'coverage', test_coverage_ext.FakeCoverage)
def test_start_coverage(self):
# Start coverage data collection.
subs = {}
response = self._do_post('os-coverage/action',
'coverage-start-post-req', subs)
self.assertEqual(response.status, 200)
def test_start_coverage_combine(self):
# Start coverage data collection.
subs = {}
response = self._do_post('os-coverage/action',
'coverage-start-combine-post-req', subs)
self.assertEqual(response.status, 200)
def test_stop_coverage(self):
# Stop coverage data collection.
subs = {
'path': '/.*',
}
response = self._do_post('os-coverage/action',
'coverage-stop-post-req', subs)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response('coverage-stop-post-resp',
subs, response)
def test_report_coverage(self):
# Generate a coverage report.
subs = {
'filename': 'report',
'path': '/.*/report',
}
response = self._do_post('os-coverage/action',
'coverage-report-post-req', subs)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response('coverage-report-post-resp',
subs, response)
def test_xml_report_coverage(self):
subs = {
'filename': 'report',
'path': '/.*/report',
}
response = self._do_post('os-coverage/action',
'coverage-xml-report-post-req', subs)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response('coverage-xml-report-post-resp',
subs, response)
class CoverageExtXmlTests(CoverageExtJsonTests):
ctype = "xml"
class ServersActionsJsonTest(ServersSampleBase):
def _test_server_action(self, uuid, action,
subs={}, resp_tpl=None, code=202):
subs.update({'action': action})
response = self._do_post('servers/%s/action' % uuid,
'server-action-%s' % action.lower(),
subs)
self.assertEqual(response.status, code)
if resp_tpl:
subs.update(self._get_regexes())
return self._verify_response(resp_tpl, subs, response)
else:
self.assertEqual(response.read(), "")
def test_server_password(self):
uuid = self._post_server()
self._test_server_action(uuid, "changePassword",
{"password": "foo"})
def test_server_reboot(self):
uuid = self._post_server()
self._test_server_action(uuid, "reboot",
{"type": "HARD"})
self._test_server_action(uuid, "reboot",
{"type": "SOFT"})
def test_server_rebuild(self):
uuid = self._post_server()
image = self.api.get_images()[0]['id']
subs = {'host': self._get_host(),
'uuid': image,
'name': 'foobar',
'pass': 'seekr3t',
'ip': '1.2.3.4',
'ip6': 'fe80::100',
'hostid': '[a-f0-9]+',
}
self._test_server_action(uuid, 'rebuild', subs,
'server-action-rebuild-resp')
def test_server_resize(self):
self.flags(allow_resize_to_same_host=True)
uuid = self._post_server()
self._test_server_action(uuid, "resize",
{"id": 2,
"host": self._get_host()})
return uuid
def test_server_revert_resize(self):
uuid = self.test_server_resize()
self._test_server_action(uuid, "revertResize")
def test_server_confirm_resize(self):
uuid = self.test_server_resize()
self._test_server_action(uuid, "confirmResize", code=204)
def test_server_create_image(self):
uuid = self._post_server()
self._test_server_action(uuid, 'createImage',
{'name': 'foo-image',
'meta_var': 'myvar',
'meta_val': 'foobar'})
class ServersActionsXmlTest(ServersActionsJsonTest):
ctype = 'xml'
class ServersActionsAllJsonTest(ServersActionsJsonTest):
all_extensions = True
class ServersActionsAllXmlTest(ServersActionsXmlTest):
all_extensions = True
class ServerStartStopJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".server_start_stop.Server_start_stop"
def _test_server_action(self, uuid, action):
response = self._do_post('servers/%s/action' % uuid,
'server_start_stop',
{'action': action})
self.assertEqual(response.status, 202)
self.assertEqual(response.read(), "")
def test_server_start(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-stop')
self._test_server_action(uuid, 'os-start')
def test_server_stop(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-stop')
class ServerStartStopXmlTest(ServerStartStopJsonTest):
ctype = 'xml'
class UserDataJsonTest(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.user_data.User_data"
def test_user_data_post(self):
user_data_contents = '#!/bin/bash\n/bin/su\necho "I am in you!"\n'
user_data = base64.b64encode(user_data_contents)
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'user_data': user_data
}
response = self._do_post('servers', 'userdata-post-req', subs)
self.assertEqual(response.status, 202)
subs.update(self._get_regexes())
return self._verify_response('userdata-post-resp', subs, response)
class UserDataXmlTest(UserDataJsonTest):
ctype = 'xml'
class FlavorsExtraDataJsonTest(ApiSampleTestBase):
extension_name = ('nova.api.openstack.compute.contrib.flavorextradata.'
'Flavorextradata')
def _get_flags(self):
f = super(FlavorsExtraDataJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Flavorextradata extension also needs Flavormanage to be loaded.
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
return f
def test_flavors_extra_data_get(self):
flavor_id = 1
response = self._do_get('flavors/%s' % flavor_id)
self.assertEqual(response.status, 200)
subs = {
'flavor_id': flavor_id,
'flavor_name': 'm1.tiny'
}
subs.update(self._get_regexes())
return self._verify_response('flavors-extra-data-get-resp', subs,
response)
def test_flavors_extra_data_list(self):
response = self._do_get('flavors/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('flavors-extra-data-list-resp', subs,
response)
def test_flavors_extra_data_create(self):
subs = {
'flavor_id': 666,
'flavor_name': 'flavortest'
}
response = self._do_post('flavors',
'flavors-extra-data-post-req',
subs)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response('flavors-extra-data-post-resp',
subs, response)
class FlavorsExtraDataXmlTest(FlavorsExtraDataJsonTest):
ctype = 'xml'
class FlavorRxtxJsonTest(ApiSampleTestBase):
extension_name = ('nova.api.openstack.compute.contrib.flavor_rxtx.'
'Flavor_rxtx')
def _get_flags(self):
f = super(FlavorRxtxJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# FlavorRxtx extension also needs Flavormanage to be loaded.
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
return f
def test_flavor_rxtx_get(self):
flavor_id = 1
response = self._do_get('flavors/%s' % flavor_id)
self.assertEqual(response.status, 200)
subs = {
'flavor_id': flavor_id,
'flavor_name': 'm1.tiny'
}
subs.update(self._get_regexes())
return self._verify_response('flavor-rxtx-get-resp', subs,
response)
def test_flavors_rxtx_list(self):
response = self._do_get('flavors/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('flavor-rxtx-list-resp', subs,
response)
def test_flavors_rxtx_create(self):
subs = {
'flavor_id': 100,
'flavor_name': 'flavortest'
}
response = self._do_post('flavors',
'flavor-rxtx-post-req',
subs)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response('flavor-rxtx-post-resp',
subs, response)
class FlavorRxtxXmlTest(FlavorRxtxJsonTest):
ctype = 'xml'
class FlavorSwapJsonTest(ApiSampleTestBase):
extension_name = ('nova.api.openstack.compute.contrib.flavor_swap.'
'Flavor_swap')
def _get_flags(self):
f = super(FlavorSwapJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# FlavorSwap extension also needs Flavormanage to be loaded.
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
return f
def test_flavor_swap_get(self):
flavor_id = 1
response = self._do_get('flavors/%s' % flavor_id)
self.assertEqual(response.status, 200)
subs = {
'flavor_id': flavor_id,
'flavor_name': 'm1.tiny'
}
subs.update(self._get_regexes())
return self._verify_response('flavor-swap-get-resp', subs,
response)
def test_flavor_swap_list(self):
response = self._do_get('flavors/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('flavor-swap-list-resp', subs,
response)
def test_flavor_swap_create(self):
subs = {
'flavor_id': 100,
'flavor_name': 'flavortest'
}
response = self._do_post('flavors',
'flavor-swap-post-req',
subs)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response('flavor-swap-post-resp',
subs, response)
class FlavorSwapXmlTest(FlavorSwapJsonTest):
ctype = 'xml'
class SecurityGroupsSampleJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".security_groups.Security_groups"
def test_security_group_create(self):
name = self.ctype + '-test'
subs = {
'group_name': name,
"description": "description",
}
response = self._do_post('os-security-groups',
'security-group-post-req', subs)
self.assertEqual(response.status, 200)
self._verify_response('security-groups-create-resp', subs, response)
def test_security_groups_list(self):
# Get api sample of security groups get list request.
response = self._do_get('os-security-groups')
subs = self._get_regexes()
return self._verify_response('security-groups-list-get-resp',
subs, response)
def test_security_groups_get(self):
# Get api sample of security groups get request.
security_group_id = '1'
response = self._do_get('os-security-groups/%s' % security_group_id)
subs = self._get_regexes()
return self._verify_response('security-groups-get-resp',
subs, response)
def test_security_groups_list_server(self):
# Get api sample of security groups for a specific server.
uuid = self._post_server()
response = self._do_get('servers/%s/os-security-groups' % uuid)
subs = self._get_regexes()
return self._verify_response('server-security-groups-list-resp',
subs, response)
class SecurityGroupsSampleXmlTest(ApiSampleTestBase):
ctype = 'xml'
class SecurityGroupDefaultRulesSampleJsonTest(ServersSampleBase):
extension_name = ('nova.api.openstack.compute.contrib'
'.security_group_default_rules'
'.Security_group_default_rules')
def test_security_group_default_rules_create(self):
response = self._do_post('os-security-group-default-rules',
'security-group-default-rules-create-req',
{})
self.assertEqual(response.status, 200)
return self._verify_response(
'security-group-default-rules-create-resp', {}, response)
def test_security_group_default_rules_list(self):
self.test_security_group_default_rules_create()
response = self._do_get('os-security-group-default-rules')
return self._verify_response('security-group-default-rules-list-resp',
{}, response)
def test_security_group_default_rules_show(self):
self.test_security_group_default_rules_create()
rule_id = '1'
response = self._do_get('os-security-group-default-rules/%s' % rule_id)
return self._verify_response('security-group-default-rules-show-resp',
{}, response)
class SecurityGroupDefaultRulesSampleXmlTest(
SecurityGroupDefaultRulesSampleJsonTest):
ctype = 'xml'
class SchedulerHintsJsonTest(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.scheduler_hints."
"Scheduler_hints")
def test_scheduler_hints_post(self):
# Get api sample of scheduler hint post request.
hints = {'image_id': fake.get_valid_image_id(),
'image_near': str(uuid_lib.uuid4())
}
response = self._do_post('servers', 'scheduler-hints-post-req',
hints)
self.assertEqual(response.status, 202)
subs = self._get_regexes()
return self._verify_response('scheduler-hints-post-resp', subs,
response)
class SchedulerHintsXmlTest(SchedulerHintsJsonTest):
ctype = 'xml'
class ConsoleOutputSampleJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".console_output.Console_output"
def test_get_console_output(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'console-output-post-req',
{'action': 'os-getConsoleOutput'})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('console-output-post-resp',
subs, response)
class ConsoleOutputSampleXmlTest(ConsoleOutputSampleJsonTest):
ctype = 'xml'
class ExtendedServerAttributesJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".extended_server_attributes" + \
".Extended_server_attributes"
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
subs['hypervisor_hostname'] = r'[\w\.\-]+'
return self._verify_response('server-get-resp',
subs, response)
def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
subs['hypervisor_hostname'] = r'[\w\.\-]+'
return self._verify_response('servers-detail-resp',
subs, response)
class ExtendedServerAttributesXmlTest(ExtendedServerAttributesJsonTest):
ctype = 'xml'
class FloatingIpsJsonTest(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib." \
"floating_ips.Floating_ips"
def setUp(self):
super(FloatingIpsJsonTest, self).setUp()
pool = CONF.default_floating_pool
interface = CONF.public_interface
self.ip_pool = [
{
'address': "10.10.10.1",
'pool': pool,
'interface': interface
},
{
'address': "10.10.10.2",
'pool': pool,
'interface': interface
},
{
'address': "10.10.10.3",
'pool': pool,
'interface': interface
},
]
self.compute.db.floating_ip_bulk_create(
context.get_admin_context(), self.ip_pool)
def tearDown(self):
self.compute.db.floating_ip_bulk_destroy(
context.get_admin_context(), self.ip_pool)
super(FloatingIpsJsonTest, self).tearDown()
def test_floating_ips_list_empty(self):
response = self._do_get('os-floating-ips')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('floating-ips-list-empty-resp',
subs, response)
def test_floating_ips_list(self):
self._do_post('os-floating-ips',
'floating-ips-create-nopool-req',
{})
self._do_post('os-floating-ips',
'floating-ips-create-nopool-req',
{})
response = self._do_get('os-floating-ips')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('floating-ips-list-resp',
subs, response)
def test_floating_ips_create_nopool(self):
response = self._do_post('os-floating-ips',
'floating-ips-create-nopool-req',
{})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('floating-ips-create-resp',
subs, response)
def test_floating_ips_create(self):
response = self._do_post('os-floating-ips',
'floating-ips-create-req',
{"pool": CONF.default_floating_pool})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('floating-ips-create-resp',
subs, response)
def test_floating_ips_get(self):
self.test_floating_ips_create()
# NOTE(sdague): the first floating ip will always have 1 as an id,
# but it would be better if we could get this from the create
response = self._do_get('os-floating-ips/%d' % 1)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('floating-ips-create-resp',
subs, response)
def test_floating_ips_delete(self):
self.test_floating_ips_create()
response = self._do_delete('os-floating-ips/%d' % 1)
self.assertEqual(response.status, 202)
class FloatingIpsXmlTest(FloatingIpsJsonTest):
ctype = 'xml'
class FloatingIpsBulkJsonTest(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib." \
"floating_ips_bulk.Floating_ips_bulk"
def setUp(self):
super(FloatingIpsBulkJsonTest, self).setUp()
pool = CONF.default_floating_pool
interface = CONF.public_interface
self.ip_pool = [
{
'address': "10.10.10.1",
'pool': pool,
'interface': interface
},
{
'address': "10.10.10.2",
'pool': pool,
'interface': interface
},
{
'address': "10.10.10.3",
'pool': pool,
'interface': interface,
'host': "testHost"
},
]
self.compute.db.floating_ip_bulk_create(
context.get_admin_context(), self.ip_pool)
def tearDown(self):
self.compute.db.floating_ip_bulk_destroy(
context.get_admin_context(), self.ip_pool)
super(FloatingIpsBulkJsonTest, self).tearDown()
def test_floating_ips_bulk_list(self):
response = self._do_get('os-floating-ips-bulk')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('floating-ips-bulk-list-resp', subs,
response)
def test_floating_ips_bulk_list_by_host(self):
response = self._do_get('os-floating-ips-bulk/testHost')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('floating-ips-bulk-list-by-host-resp',
subs, response)
def test_floating_ips_bulk_create(self):
response = self._do_post('os-floating-ips-bulk',
'floating-ips-bulk-create-req',
{"ip_range": "192.168.1.0/24",
"pool": CONF.default_floating_pool,
"interface": CONF.public_interface})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('floating-ips-bulk-create-resp', subs,
response)
def test_floating_ips_bulk_delete(self):
response = self._do_put('os-floating-ips-bulk/delete',
'floating-ips-bulk-delete-req',
{"ip_range": "192.168.1.0/24"})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('floating-ips-bulk-delete-resp', subs,
response)
class FloatingIpsBulkXmlTest(FloatingIpsBulkJsonTest):
ctype = 'xml'
class KeyPairsSampleJsonTest(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.keypairs.Keypairs"
def generalize_subs(self, subs, vanilla_regexes):
subs['keypair_name'] = 'keypair-[0-9a-f-]+'
return subs
def test_keypairs_post(self, public_key=None):
"""Get api sample of key pairs post request."""
key_name = 'keypair-' + str(uuid_lib.uuid4())
response = self._do_post('os-keypairs', 'keypairs-post-req',
{'keypair_name': key_name})
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self.assertEqual(response.status, 200)
self._verify_response('keypairs-post-resp', subs, response)
# NOTE(maurosr): return the key_name is necessary cause the
# verification returns the label of the last compared information in
# the response, not necessarily the key name.
return key_name
def test_keypairs_import_key_post(self):
# Get api sample of key pairs post to import user's key.
key_name = 'keypair-' + str(uuid_lib.uuid4())
subs = {
'keypair_name': key_name,
'public_key': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
"B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l"
"RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv"
"9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc"
"pSxsIbECHw== Generated by Nova"
}
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
subs)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self.assertEqual(response.status, 200)
self._verify_response('keypairs-import-post-resp', subs, response)
def test_keypairs_get(self):
# Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs')
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
return self._verify_response('keypairs-get-resp', subs, response)
class KeyPairsSampleXmlTest(KeyPairsSampleJsonTest):
ctype = 'xml'
class RescueJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".rescue.Rescue")
def _rescue(self, uuid):
req_subs = {
'password': 'MySecretPass'
}
response = self._do_post('servers/%s/action' % uuid,
'server-rescue-req', req_subs)
self._verify_response('server-rescue', req_subs, response)
def _unrescue(self, uuid):
response = self._do_post('servers/%s/action' % uuid,
'server-unrescue-req', {})
self.assertEqual(response.status, 202)
def test_server_rescue(self):
uuid = self._post_server()
self._rescue(uuid)
# Do a server get to make sure that the 'RESCUE' state is set
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['status'] = 'RESCUE'
self._verify_response('server-get-resp-rescue', subs, response)
def test_server_unrescue(self):
uuid = self._post_server()
self._rescue(uuid)
self._unrescue(uuid)
# Do a server get to make sure that the 'ACTIVE' state is back
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['status'] = 'ACTIVE'
self._verify_response('server-get-resp-unrescue', subs, response)
class RescueXmlTest(RescueJsonTest):
ctype = 'xml'
class VirtualInterfacesJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".virtual_interfaces.Virtual_interfaces")
def test_vifs_list(self):
uuid = self._post_server()
response = self._do_get('servers/%s/os-virtual-interfaces' % uuid)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
self._verify_response('vifs-list-resp', subs, response)
class VirtualInterfacesXmlTest(VirtualInterfacesJsonTest):
ctype = 'xml'
class CloudPipeSampleJsonTest(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.cloudpipe.Cloudpipe"
def setUp(self):
super(CloudPipeSampleJsonTest, self).setUp()
def get_user_data(self, project_id):
"""Stub method to generate user data for cloudpipe tests."""
return "VVNFUiBEQVRB\n"
def network_api_get(self, context, network_uuid):
"""Stub to get a valid network and its information."""
return {'vpn_public_address': '127.0.0.1',
'vpn_public_port': 22}
self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
self.stubs.Set(network_api.API, "get",
network_api_get)
def generalize_subs(self, subs, vanilla_regexes):
subs['project_id'] = 'cloudpipe-[0-9a-f-]+'
return subs
def test_cloud_pipe_create(self):
# Get api samples of cloud pipe extension creation.
self.flags(vpn_image_id=fake.get_valid_image_id())
project = {'project_id': 'cloudpipe-' + str(uuid_lib.uuid4())}
response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
project)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs.update(project)
subs['image_id'] = CONF.vpn_image_id
self._verify_response('cloud-pipe-create-resp', subs, response)
return project
def test_cloud_pipe_list(self):
# Get api samples of cloud pipe extension get request.
project = self.test_cloud_pipe_create()
response = self._do_get('os-cloudpipe')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs.update(project)
subs['image_id'] = CONF.vpn_image_id
return self._verify_response('cloud-pipe-get-resp', subs, response)
class CloudPipeSampleXmlTest(CloudPipeSampleJsonTest):
ctype = "xml"
class CloudPipeUpdateJsonTest(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib"
".cloudpipe_update.Cloudpipe_update")
def _get_flags(self):
f = super(CloudPipeUpdateJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Cloudpipe_update also needs cloudpipe to be loaded
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.cloudpipe.Cloudpipe')
return f
def test_cloud_pipe_update(self):
subs = {'vpn_ip': '192.168.1.1',
'vpn_port': 2000}
response = self._do_put('os-cloudpipe/configure-project',
'cloud-pipe-update-req',
subs)
self.assertEqual(response.status, 202)
class CloudPipeUpdateXmlTest(CloudPipeUpdateJsonTest):
ctype = "xml"
class AgentsJsonTest(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.agents.Agents"
def _get_flags(self):
f = super(AgentsJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
return f
def setUp(self):
super(AgentsJsonTest, self).setUp()
fake_agents_list = [{'url': 'xxxxxxxxxxxx',
'hypervisor': 'hypervisor',
'architecture': 'x86',
'os': 'os',
'version': '8.0',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'id': '1'}]
def fake_agent_build_create(context, values):
values['id'] = '1'
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
return agent_build_ref
def fake_agent_build_get_all(context, hypervisor):
agent_build_all = []
for agent in fake_agents_list:
if hypervisor and hypervisor != agent['hypervisor']:
continue
agent_build_ref = models.AgentBuild()
agent_build_ref.update(agent)
agent_build_all.append(agent_build_ref)
return agent_build_all
def fake_agent_build_update(context, agent_build_id, values):
pass
def fake_agent_build_destroy(context, agent_update_id):
pass
self.stubs.Set(db, "agent_build_create",
fake_agent_build_create)
self.stubs.Set(db, "agent_build_get_all",
fake_agent_build_get_all)
self.stubs.Set(db, "agent_build_update",
fake_agent_build_update)
self.stubs.Set(db, "agent_build_destroy",
fake_agent_build_destroy)
def test_agent_create(self):
# Creates a new agent build.
project = {'url': 'xxxxxxxxxxxx',
'hypervisor': 'hypervisor',
'architecture': 'x86',
'os': 'os',
'version': '8.0',
'md5hash': 'add6bb58e139be103324d04d82d8f545'
}
response = self._do_post('os-agents', 'agent-post-req',
project)
self.assertEqual(response.status, 200)
project['agent_id'] = 1
self._verify_response('agent-post-resp', project, response)
return project
def test_agent_list(self):
# Return a list of all agent builds.
response = self._do_get('os-agents')
self.assertEqual(response.status, 200)
project = {'url': 'xxxxxxxxxxxx',
'hypervisor': 'hypervisor',
'architecture': 'x86',
'os': 'os',
'version': '8.0',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1
}
return self._verify_response('agents-get-resp', project, response)
def test_agent_update(self):
# Update an existing agent build.
agent_id = 1
subs = {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}
response = self._do_put('os-agents/%s' % agent_id,
'agent-update-put-req', subs)
self.assertEqual(response.status, 200)
subs['agent_id'] = 1
return self._verify_response('agent-update-put-resp', subs, response)
def test_agent_delete(self):
# Deletes an existing agent build.
agent_id = 1
response = self._do_delete('os-agents/%s' % agent_id)
self.assertEqual(response.status, 200)
class AgentsXmlTest(AgentsJsonTest):
ctype = "xml"
class FixedIpJsonTest(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.fixed_ips.Fixed_ips"
def _get_flags(self):
f = super(FixedIpJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
return f
def setUp(self):
super(FixedIpJsonTest, self).setUp()
fake_fixed_ips = [{'id': 1,
'address': '192.168.1.1',
'network_id': 1,
'virtual_interface_id': 1,
'instance_uuid': '1',
'allocated': False,
'leased': False,
'reserved': False,
'host': None},
{'id': 2,
'address': '192.168.1.2',
'network_id': 1,
'virtual_interface_id': 2,
'instance_uuid': '2',
'allocated': False,
'leased': False,
'reserved': False,
'host': None},
]
def fake_fixed_ip_get_by_address(context, address):
for fixed_ip in fake_fixed_ips:
if fixed_ip['address'] == address:
return fixed_ip
raise exception.FixedIpNotFoundForAddress(address=address)
def fake_fixed_ip_get_by_address_detailed(context, address):
network = {'id': 1,
'cidr': "192.168.1.0/24"}
host = {'host': "host",
'hostname': 'openstack'}
for fixed_ip in fake_fixed_ips:
if fixed_ip['address'] == address:
return (fixed_ip, network, host)
raise exception.FixedIpNotFoundForAddress(address=address)
def fake_fixed_ip_update(context, address, values):
fixed_ip = fake_fixed_ip_get_by_address(context, address)
if fixed_ip is None:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
for key in values:
fixed_ip[key] = values[key]
self.stubs.Set(db, "fixed_ip_get_by_address",
fake_fixed_ip_get_by_address)
self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
fake_fixed_ip_get_by_address_detailed)
self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
def test_fixed_ip_reserve(self):
# Reserve a Fixed IP.
project = {'reserve': None}
response = self._do_post('os-fixed-ips/192.168.1.1/action',
'fixedip-post-req',
project)
self.assertEqual(response.status, 202)
def test_get_fixed_ip(self):
# Return data about the given fixed ip.
response = self._do_get('os-fixed-ips/192.168.1.1')
self.assertEqual(response.status, 200)
project = {'cidr': '192.168.1.0/24',
'hostname': 'openstack',
'host': 'host',
'address': '192.168.1.1'}
return self._verify_response('fixedips-get-resp', project, response)
class FixedIpXmlTest(FixedIpJsonTest):
ctype = "xml"
class AggregatesSampleJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".aggregates.Aggregates"
def test_aggregate_create(self):
subs = {
"aggregate_id": '(?P<id>\d+)'
}
response = self._do_post('os-aggregates', 'aggregate-post-req', subs)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response('aggregate-post-resp', subs, response)
def test_list_aggregates(self):
self.test_aggregate_create()
response = self._do_get('os-aggregates')
subs = self._get_regexes()
return self._verify_response('aggregates-list-get-resp',
subs, response)
def test_aggregate_get(self):
agg_id = self.test_aggregate_create()
response = self._do_get('os-aggregates/%s' % agg_id)
subs = self._get_regexes()
return self._verify_response('aggregates-get-resp', subs, response)
def test_add_metadata(self):
agg_id = self.test_aggregate_create()
response = self._do_post('os-aggregates/%s/action' % agg_id,
'aggregate-metadata-post-req',
{'action': 'set_metadata'})
subs = self._get_regexes()
return self._verify_response('aggregates-metadata-post-resp',
subs, response)
def test_add_host(self):
aggregate_id = self.test_aggregate_create()
subs = {
"host_name": self.compute.host,
}
response = self._do_post('os-aggregates/%s/action' % aggregate_id,
'aggregate-add-host-post-req', subs)
subs.update(self._get_regexes())
return self._verify_response('aggregates-add-host-post-resp',
subs, response)
def test_remove_host(self):
self.test_add_host()
subs = {
"host_name": self.compute.host,
}
response = self._do_post('os-aggregates/1/action',
'aggregate-remove-host-post-req', subs)
subs.update(self._get_regexes())
return self._verify_response('aggregates-remove-host-post-resp',
subs, response)
def test_update_aggregate(self):
aggregate_id = self.test_aggregate_create()
response = self._do_put('os-aggregates/%s' % aggregate_id,
'aggregate-update-post-req', {})
subs = self._get_regexes()
return self._verify_response('aggregate-update-post-resp',
subs, response)
class AggregatesSampleXmlTest(AggregatesSampleJsonTest):
ctype = 'xml'
class CertificatesSamplesJsonTest(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.certificates."
"Certificates")
def test_create_certificates(self):
response = self._do_post('os-certificates',
'certificate-create-req', {})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('certificate-create-resp', subs, response)
def test_get_root_certificate(self):
response = self._do_get('os-certificates/root')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('certificate-get-root-resp', subs,
response)
class CertificatesSamplesXmlTest(CertificatesSamplesJsonTest):
ctype = 'xml'
class UsedLimitsSamplesJsonTest(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.used_limits."
"Used_limits")
def test_get_used_limits(self):
# Get api sample to used limits.
response = self._do_get('limits')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('usedlimits-get-resp', subs, response)
class UsedLimitsSamplesXmlTest(UsedLimitsSamplesJsonTest):
ctype = "xml"
class MultipleCreateJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.multiple_create."
"Multiple_create")
def test_multiple_create(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'min_count': "2",
'max_count': "3"
}
response = self._do_post('servers', 'multiple-create-post-req', subs)
self.assertEqual(response.status, 202)
subs.update(self._get_regexes())
return self._verify_response('multiple-create-post-resp',
subs, response)
def test_multiple_create_without_reservation_id(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'min_count': "2",
'max_count': "3"
}
response = self._do_post('servers', 'multiple-create-no-resv-post-req',
subs)
self.assertEqual(response.status, 202)
subs.update(self._get_regexes())
return self._verify_response('multiple-create-no-resv-post-resp',
subs, response)
class MultipleCreateXmlTest(MultipleCreateJsonTest):
ctype = 'xml'
class ServicesJsonTest(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.services.Services"
def setUp(self):
super(ServicesJsonTest, self).setUp()
self.stubs.Set(db, "service_get_all",
test_services.fake_service_get_all)
self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow)
self.stubs.Set(db, "service_get_by_args",
test_services.fake_service_get_by_host_binary)
self.stubs.Set(db, "service_update",
test_services.fake_service_update)
def tearDown(self):
super(ServicesJsonTest, self).tearDown()
timeutils.clear_time_override()
def test_services_list(self):
"""Return a list of all agent builds."""
response = self._do_get('os-services')
self.assertEqual(response.status, 200)
subs = {'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'status': 'disabled',
'state': 'up'}
subs.update(self._get_regexes())
return self._verify_response('services-list-get-resp',
subs, response)
def test_service_enable(self):
"""Enable an existing agent build."""
subs = {"host": "host1",
'service': 'nova-compute'}
response = self._do_put('/os-services/enable',
'service-enable-put-req', subs)
self.assertEqual(response.status, 200)
subs = {"host": "host1",
"service": "nova-compute"}
return self._verify_response('service-enable-put-resp',
subs, response)
def test_service_disable(self):
"""Disable an existing agent build."""
subs = {"host": "host1",
'service': 'nova-compute'}
response = self._do_put('/os-services/disable',
'service-disable-put-req', subs)
self.assertEqual(response.status, 200)
subs = {"host": "host1",
"service": "nova-compute"}
return self._verify_response('service-disable-put-resp',
subs, response)
class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.simple_tenant_usage."
"Simple_tenant_usage")
def setUp(self):
"""setUp method for simple tenant usage."""
super(SimpleTenantUsageSampleJsonTest, self).setUp()
self._post_server()
timeutils.set_time_override(timeutils.utcnow() +
datetime.timedelta(hours=1))
self.query = {
'start': str(timeutils.utcnow() - datetime.timedelta(hours=1)),
'end': str(timeutils.utcnow())
}
def tearDown(self):
"""tearDown method for simple tenant usage."""
super(SimpleTenantUsageSampleJsonTest, self).tearDown()
timeutils.clear_time_override()
def test_get_tenants_usage(self):
# Get api sample to get all tenants usage request.
response = self._do_get('os-simple-tenant-usage?%s' % (
urllib.urlencode(self.query)))
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('simple-tenant-usage-get', subs, response)
def test_get_tenant_usage_details(self):
# Get api sample to get specific tenant usage request.
tenant_id = 'openstack'
response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id,
urllib.urlencode(self.query)))
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('simple-tenant-usage-get-specific', subs,
response)
class SimpleTenantUsageSampleXmlTest(SimpleTenantUsageSampleJsonTest):
ctype = "xml"
class ServerDiagnosticsSamplesJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.server_diagnostics."
"Server_diagnostics")
def test_server_diagnostics_get(self):
uuid = self._post_server()
response = self._do_get('servers/%s/diagnostics' % uuid)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('server-diagnostics-get-resp', subs,
response)
class ServerDiagnosticsSamplesXmlTest(ServerDiagnosticsSamplesJsonTest):
ctype = "xml"
class AvailabilityZoneJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.availability_zone."
"Availability_zone")
def test_create_availability_zone(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
"availability_zone": "nova"
}
response = self._do_post('servers', 'availability-zone-post-req', subs)
self.assertEqual(response.status, 202)
subs.update(self._get_regexes())
return self._verify_response('availability-zone-post-resp',
subs, response)
class AvailabilityZoneXmlTest(AvailabilityZoneJsonTest):
ctype = "xml"
class AdminActionsSamplesJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.admin_actions."
"Admin_actions")
def setUp(self):
"""setUp Method for AdminActions api samples extension
This method creates the server that will be used in each tests"""
super(AdminActionsSamplesJsonTest, self).setUp()
self.uuid = self._post_server()
def test_post_pause(self):
# Get api samples to pause server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-pause', {})
self.assertEqual(response.status, 202)
def test_post_unpause(self):
# Get api samples to unpause server request.
self.test_post_pause()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-unpause', {})
self.assertEqual(response.status, 202)
def test_post_suspend(self):
# Get api samples to suspend server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-suspend', {})
self.assertEqual(response.status, 202)
def test_post_resume(self):
# Get api samples to server resume request.
self.test_post_suspend()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-resume', {})
self.assertEqual(response.status, 202)
def test_post_migrate(self):
# Get api samples to migrate server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-migrate', {})
self.assertEqual(response.status, 202)
def test_post_reset_network(self):
# Get api samples to reset server network request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-network', {})
self.assertEqual(response.status, 202)
def test_post_inject_network_info(self):
# Get api samples to inject network info request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-inject-network-info', {})
self.assertEqual(response.status, 202)
def test_post_lock_server(self):
# Get api samples to lock server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-lock-server', {})
self.assertEqual(response.status, 202)
def test_post_unlock_server(self):
# Get api samples to unlock server request.
self.test_post_lock_server()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-unlock-server', {})
self.assertEqual(response.status, 202)
def test_post_backup_server(self):
# Get api samples to backup server request.
def image_details(self, context, **kwargs):
"""This stub is specifically used on the backup action."""
# NOTE(maurosr): I've added this simple stub cause backup action
# was trapped in infinite loop during fetch image phase since the
# fake Image Service always returns the same set of images
return None
self.stubs.Set(fake._FakeImageService, 'detail', image_details)
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-backup-server', {})
self.assertEqual(response.status, 202)
def test_post_live_migrate_server(self):
# Get api samples to server live migrate request.
def fake_live_migration_src_check(self, context, instance_ref):
"""Skip live migration scheduler checks."""
return
def fake_live_migration_dest_check(self, context, instance_ref, dest):
"""Skip live migration scheduler checks."""
return dest
def fake_live_migration_common(self, context, instance_ref, dest):
"""Skip live migration scheduler checks."""
return
self.stubs.Set(driver.Scheduler, '_live_migration_src_check',
fake_live_migration_src_check)
self.stubs.Set(driver.Scheduler, '_live_migration_dest_check',
fake_live_migration_dest_check)
self.stubs.Set(driver.Scheduler, '_live_migration_common_check',
fake_live_migration_common)
def fake_get_compute(context, host):
service = dict(host=host,
binary='nova-compute',
topic='compute',
report_count=1,
updated_at='foo',
hypervisor_type='bar',
hypervisor_version='1',
disabled=False)
return {'compute_node': [service]}
self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-live-migrate',
{'hostname': self.compute.host})
self.assertEqual(response.status, 202)
def test_post_reset_state(self):
# get api samples to server reset state request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-server-state', {})
self.assertEqual(response.status, 202)
class AdminActionsSamplesXmlTest(AdminActionsSamplesJsonTest):
ctype = 'xml'
class ConsolesSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".consoles.Consoles")
def setUp(self):
super(ConsolesSampleJsonTests, self).setUp()
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
def test_get_vnc_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-vnc-console-post-req',
{'action': 'os-getVNCConsole'})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs["url"] = \
"((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
return self._verify_response('get-vnc-console-post-resp',
subs, response)
def test_get_spice_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-spice-console-post-req',
{'action': 'os-getSPICEConsole'})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs["url"] = \
"((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
return self._verify_response('get-spice-console-post-resp',
subs, response)
class ConsolesSampleXmlTests(ConsolesSampleJsonTests):
ctype = 'xml'
class DeferredDeleteSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".deferred_delete.Deferred_delete")
def setUp(self):
super(DeferredDeleteSampleJsonTests, self).setUp()
self.flags(reclaim_instance_interval=1)
def test_restore(self):
uuid = self._post_server()
response = self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'restore-post-req', {})
self.assertEqual(response.status, 202)
self.assertEqual(response.read(), '')
def test_force_delete(self):
uuid = self._post_server()
response = self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'force-delete-post-req', {})
self.assertEqual(response.status, 202)
self.assertEqual(response.read(), '')
class DeferredDeleteSampleXmlTests(DeferredDeleteSampleJsonTests):
ctype = 'xml'
class QuotasSampleJsonTests(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
def test_show_quotas(self):
# Get api sample to show quotas.
response = self._do_get('os-quota-sets/fake_tenant')
self.assertEqual(response.status, 200)
return self._verify_response('quotas-show-get-resp', {}, response)
def test_show_quotas_defaults(self):
# Get api sample to show quotas defaults.
response = self._do_get('os-quota-sets/fake_tenant/defaults')
self.assertEqual(response.status, 200)
return self._verify_response('quotas-show-defaults-get-resp',
{}, response)
def test_update_quotas(self):
# Get api sample to update quotas.
response = self._do_put('os-quota-sets/fake_tenant',
'quotas-update-post-req',
{})
self.assertEqual(response.status, 200)
return self._verify_response('quotas-update-post-resp', {}, response)
class QuotasSampleXmlTests(QuotasSampleJsonTests):
ctype = "xml"
class ExtendedIpsSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_ips.Extended_ips")
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['hypervisor_hostname'] = r'[\w\.\-]+'
return self._verify_response('server-get-resp', subs, response)
def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['id'] = uuid
subs['hostid'] = '[a-f0-9]+'
return self._verify_response('servers-detail-resp', subs, response)
class ExtendedIpsSampleXmlTests(ExtendedIpsSampleJsonTests):
ctype = 'xml'
class ExtendedStatusSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_status.Extended_status")
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
return self._verify_response('server-get-resp', subs, response)
def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['id'] = uuid
subs['hostid'] = '[a-f0-9]+'
return self._verify_response('servers-detail-resp', subs, response)
class ExtendedStatusSampleXmlTests(ExtendedStatusSampleJsonTests):
ctype = 'xml'
class FlavorManageSampleJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.flavormanage."
"Flavormanage")
def _create_flavor(self):
"""Create a flavor."""
subs = {
'flavor_id': 10,
'flavor_name': "test_flavor"
}
response = self._do_post("flavors",
"flavor-create-post-req",
subs)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response("flavor-create-post-resp", subs, response)
def test_create_flavor(self):
# Get api sample to create a flavor.
self._create_flavor()
def test_delete_flavor(self):
# Get api sample to delete a flavor.
self._create_flavor()
response = self._do_delete("flavors/10")
self.assertEqual(response.status, 202)
self.assertEqual(response.read(), '')
class FlavorManageSampleXmlTests(FlavorManageSampleJsonTests):
ctype = "xml"
class ServerPasswordSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.server_password."
"Server_password")
def test_get_password(self):
# Mock password since there is no api to set it
def fake_ext_password(*args, **kwargs):
return ("xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/"
"Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp"
"28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtV"
"VzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNX"
"JjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrj"
"QskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+Ac"
"X//PXk3uJ5kC7d67fPXaVz4WaQRYMg==")
self.stubs.Set(password, "extract_password", fake_ext_password)
uuid = self._post_server()
response = self._do_get('servers/%s/os-server-password' % uuid)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['encrypted_password'] = fake_ext_password().replace('+', '\\+')
return self._verify_response('get-password-resp', subs, response)
def test_reset_password(self):
uuid = self._post_server()
response = self._do_delete('servers/%s/os-server-password' % uuid)
self.assertEqual(response.status, 204)
class ServerPasswordSampleXmlTests(ServerPasswordSampleJsonTests):
ctype = "xml"
class DiskConfigJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.disk_config."
"Disk_config")
def test_list_servers_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
return self._verify_response('list-servers-detail-get',
subs, response)
def test_get_server(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
return self._verify_response('server-get-resp', subs, response)
def test_update_server(self):
uuid = self._post_server()
response = self._do_put('servers/%s' % uuid,
'server-update-put-req', {})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
return self._verify_response('server-update-put-resp',
subs, response)
def test_resize_server(self):
self.flags(allow_resize_to_same_host=True)
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'server-resize-post-req', {})
self.assertEqual(response.status, 202)
# NOTE(tmello): Resize does not return response body
# Bug #1085213.
self.assertEqual(response.read(), "")
def test_rebuild_server(self):
uuid = self._post_server()
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
}
response = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild-req', subs)
self.assertEqual(response.status, 202)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
return self._verify_response('server-action-rebuild-resp',
subs, response)
def test_get_image(self):
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s' % image_id)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['image_id'] = image_id
return self._verify_response('image-get-resp', subs, response)
def test_list_images(self):
response = self._do_get('images/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('image-list-resp', subs, response)
class DiskConfigXmlTest(DiskConfigJsonTest):
ctype = 'xml'
class OsNetworksJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.os_tenant_networks"
".Os_tenant_networks")
def setUp(self):
super(OsNetworksJsonTests, self).setUp()
CONF.set_override("enable_network_quota", True)
def fake(*args, **kwargs):
pass
self.stubs.Set(nova.quota.QUOTAS, "reserve", fake)
self.stubs.Set(nova.quota.QUOTAS, "commit", fake)
self.stubs.Set(nova.quota.QUOTAS, "rollback", fake)
self.stubs.Set(nova.quota.QuotaEngine, "reserve", fake)
self.stubs.Set(nova.quota.QuotaEngine, "commit", fake)
self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
def test_list_networks(self):
response = self._do_get('os-tenant-networks')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('networks-list-res', subs, response)
def test_create_network(self):
response = self._do_post('os-tenant-networks', "networks-post-req", {})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('networks-post-res', subs, response)
def test_delete_network(self):
response = self._do_post('os-tenant-networks', "networks-post-req", {})
net = json.loads(response.read())
response = self._do_delete('os-tenant-networks/%s' %
net["network"]["id"])
self.assertEqual(response.status, 202)
class NetworksJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib"
".os_networks.Os_networks")
def setUp(self):
super(NetworksJsonTests, self).setUp()
fake_network_api = test_networks.FakeNetworkAPI()
self.stubs.Set(network_api.API, "get_all",
fake_network_api.get_all)
self.stubs.Set(network_api.API, "get",
fake_network_api.get)
self.stubs.Set(network_api.API, "associate",
fake_network_api.associate)
self.stubs.Set(network_api.API, "delete",
fake_network_api.delete)
self.stubs.Set(network_api.API, "create",
fake_network_api.create)
self.stubs.Set(network_api.API, "add_network_to_project",
fake_network_api.add_network_to_project)
def test_network_list(self):
response = self._do_get('os-networks')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('networks-list-resp', subs, response)
def test_network_disassociate(self):
uuid = test_networks.FAKE_NETWORKS[0]['uuid']
response = self._do_post('os-networks/%s/action' % uuid,
'networks-disassociate-req', {})
self.assertEqual(response.status, 202)
def test_network_show(self):
uuid = test_networks.FAKE_NETWORKS[0]['uuid']
response = self._do_get('os-networks/%s' % uuid)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('network-show-resp', subs, response)
def test_network_create(self):
response = self._do_post("os-networks",
'network-create-req', {})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('network-create-resp', subs, response)
def test_network_add(self):
response = self._do_post("os-networks/add",
'network-add-req', {})
self.assertEqual(response.status, 202)
class NetworksXmlTests(NetworksJsonTests):
ctype = 'xml'
class NetworksAssociateJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib"
".networks_associate.Networks_associate")
_sentinel = object()
def _get_flags(self):
f = super(NetworksAssociateJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Networks_associate requires Networks to be update
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.os_networks.Os_networks')
return f
def setUp(self):
super(NetworksAssociateJsonTests, self).setUp()
def fake_associate(self, context, network_id,
host=NetworksAssociateJsonTests._sentinel,
project=NetworksAssociateJsonTests._sentinel):
return True
self.stubs.Set(network_api.API, "associate", fake_associate)
def test_disassociate(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-req',
{})
self.assertEqual(response.status, 202)
def test_disassociate_host(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-host-req',
{})
self.assertEqual(response.status, 202)
def test_disassociate_project(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-project-req',
{})
self.assertEqual(response.status, 202)
def test_associate_host(self):
response = self._do_post('os-networks/1/action',
'network-associate-host-req',
{"host": "testHost"})
self.assertEqual(response.status, 202)
class NetworksAssociateXmlTests(NetworksAssociateJsonTests):
ctype = 'xml'
class FlavorDisabledSampleJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.flavor_disabled."
"Flavor_disabled")
def test_show_flavor(self):
# Get api sample to show flavor_disabled attr. of a flavor.
flavor_id = 1
response = self._do_get('flavors/%s' % flavor_id)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['flavor_id'] = flavor_id
return self._verify_response('flavor-show-get-resp', subs,
response)
def test_detail_flavor(self):
# Get api sample to show details of a flavor.
response = self._do_get('flavors/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('flavor-detail-get-resp', subs,
response)
class FlavorDisabledSampleXmlTests(FlavorDisabledSampleJsonTests):
ctype = "xml"
class QuotaClassesSampleJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.quota_classes."
"Quota_classes")
set_id = 'test_class'
def test_show_quota_classes(self):
# Get api sample to show quota classes.
response = self._do_get('os-quota-class-sets/%s' % self.set_id)
self.assertEqual(response.status, 200)
subs = {'set_id': self.set_id}
return self._verify_response('quota-classes-show-get-resp', subs,
response)
def test_update_quota_classes(self):
# Get api sample to update quota classes.
response = self._do_put('os-quota-class-sets/%s' % self.set_id,
'quota-classes-update-post-req',
{})
self.assertEqual(response.status, 200)
return self._verify_response('quota-classes-update-post-resp',
{}, response)
class QuotaClassesSampleXmlTests(QuotaClassesSampleJsonTests):
ctype = "xml"
class CellsSampleJsonTest(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.cells.Cells"
def setUp(self):
# db_check_interval < 0 makes cells manager always hit the DB
self.flags(enable=True, db_check_interval=-1, group='cells')
super(CellsSampleJsonTest, self).setUp()
self._stub_cells()
def _stub_cells(self, num_cells=5):
self.cells = []
self.cells_next_id = 1
def _fake_cell_get_all(context):
return self.cells
def _fake_cell_get(context, cell_name):
for cell in self.cells:
if cell['name'] == cell_name:
return cell
raise exception.CellNotFound(cell_name=cell_name)
for x in xrange(num_cells):
cell = models.Cell()
our_id = self.cells_next_id
self.cells_next_id += 1
cell.update({'id': our_id,
'name': 'cell%s' % our_id,
'username': 'username%s' % our_id,
'is_parent': our_id % 2 == 0})
self.cells.append(cell)
self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
self.stubs.Set(db, 'cell_get', _fake_cell_get)
def test_cells_empty_list(self):
# Override this
self._stub_cells(num_cells=0)
response = self._do_get('os-cells')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('cells-list-empty-resp', subs, response)
def test_cells_list(self):
response = self._do_get('os-cells')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('cells-list-resp', subs, response)
def test_cells_get(self):
response = self._do_get('os-cells/cell3')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('cells-get-resp', subs, response)
class CellsSampleXmlTest(CellsSampleJsonTest):
ctype = 'xml'
class BareMetalNodesJsonTest(ApiSampleTestBase, bm_db_base.BMDBTestCase):
extension_name = ('nova.api.openstack.compute.contrib.baremetal_nodes.'
'Baremetal_nodes')
def _create_node(self):
response = self._do_post("os-baremetal-nodes",
"baremetal-node-create-req",
{})
self.assertEqual(response.status, 200)
subs = {'node_id': '(?P<id>\d+)'}
return self._verify_response("baremetal-node-create-resp",
subs, response)
def test_create_node(self):
self._create_node()
def test_list_nodes(self):
node_id = self._create_node()
interface_id = self._add_interface(node_id)
response = self._do_get('os-baremetal-nodes')
self.assertEqual(response.status, 200)
subs = {'node_id': node_id,
'interface_id': interface_id,
'address': 'aa:aa:aa:aa:aa:aa',
}
return self._verify_response('baremetal-node-list-resp',
subs, response)
def test_show_node(self):
node_id = self._create_node()
interface_id = self._add_interface(node_id)
response = self._do_get('os-baremetal-nodes/%s' % node_id)
self.assertEqual(response.status, 200)
subs = {'node_id': node_id,
'interface_id': interface_id,
'address': 'aa:aa:aa:aa:aa:aa',
}
return self._verify_response('baremetal-node-show-resp',
subs, response)
def test_delete_node(self):
node_id = self._create_node()
response = self._do_delete("os-baremetal-nodes/%s" % node_id)
self.assertEqual(response.status, 202)
def _add_interface(self, node_id):
response = self._do_post("os-baremetal-nodes/%s/action" % node_id,
"baremetal-node-add-interface-req",
{'address': 'aa:aa:aa:aa:aa:aa'})
self.assertEqual(response.status, 200)
subs = {'interface_id': r'(?P<id>\d+)'}
return self._verify_response("baremetal-node-add-interface-resp",
subs, response)
def test_add_interface(self):
node_id = self._create_node()
self._add_interface(node_id)
def test_remove_interface(self):
node_id = self._create_node()
self._add_interface(node_id)
response = self._do_post("os-baremetal-nodes/%s/action" % node_id,
"baremetal-node-remove-interface-req",
{'address': 'aa:aa:aa:aa:aa:aa'})
self.assertEqual(response.status, 202)
self.assertEqual(response.read(), "")
class BareMetalNodesXmlTest(BareMetalNodesJsonTest):
ctype = 'xml'
class FloatingIPPoolsSampleJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.floating_ip_pools."
"Floating_ip_pools")
def test_list_floatingippools(self):
pool_list = ["pool1", "pool2"]
def fake_get_floating_ip_pools(self, context):
return [{'name': pool_list[0]},
{'name': pool_list[1]}]
self.stubs.Set(network_api.API, "get_floating_ip_pools",
fake_get_floating_ip_pools)
response = self._do_get('os-floating-ip-pools')
self.assertEqual(response.status, 200)
subs = {
'pool1': pool_list[0],
'pool2': pool_list[1]
}
return self._verify_response('floatingippools-list-resp',
subs, response)
class FloatingIPPoolsSampleXmlTests(FloatingIPPoolsSampleJsonTests):
ctype = 'xml'
class MultinicSampleJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib.multinic.Multinic"
def setUp(self):
super(MultinicSampleJsonTest, self).setUp()
self.uuid = self._post_server()
def test_add_fixed_ip(self):
subs = {"networkId": 1}
response = self._do_post('servers/%s/action' % (self.uuid),
'multinic-add-fixed-ip-req', subs)
self.assertEqual(response.status, 202)
def test_remove_fixed_ip(self):
subs = {"ip": "10.0.0.2"}
response = self._do_post('servers/%s/action' % (self.uuid),
'multinic-remove-fixed-ip-req', subs)
self.assertEqual(response.status, 202)
class MultinicSampleXmlTest(MultinicSampleJsonTest):
ctype = "xml"
class InstanceUsageAuditLogJsonTest(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib."
"instance_usage_audit_log.Instance_usage_audit_log")
def test_show_instance_usage_audit_log(self):
response = self._do_get('os-instance_usage_audit_log/%s' %
urllib.quote('2012-07-05 10:00:00'))
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
return self._verify_response('inst-usage-audit-log-show-get-resp',
subs, response)
def test_index_instance_usage_audit_log(self):
response = self._do_get('os-instance_usage_audit_log')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
return self._verify_response('inst-usage-audit-log-index-get-resp',
subs, response)
class InstanceUsageAuditLogXmlTest(InstanceUsageAuditLogJsonTest):
ctype = "xml"
class FlavorExtraSpecsSampleJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.flavorextraspecs."
"Flavorextraspecs")
def _flavor_extra_specs_create(self):
subs = {'value1': 'value1',
'value2': 'value2'
}
response = self._do_post('flavors/1/os-extra_specs',
'flavor-extra-specs-create-req', subs)
self.assertEqual(response.status, 200)
return self._verify_response('flavor-extra-specs-create-resp',
subs, response)
def test_flavor_extra_specs_get(self):
subs = {'value1': 'value1'}
self._flavor_extra_specs_create()
response = self._do_get('flavors/1/os-extra_specs/key1')
self.assertEqual(response.status, 200)
return self._verify_response('flavor-extra-specs-get-resp',
subs, response)
def test_flavor_extra_specs_list(self):
subs = {'value1': 'value1',
'value2': 'value2'
}
self._flavor_extra_specs_create()
response = self._do_get('flavors/1/os-extra_specs')
self.assertEqual(response.status, 200)
return self._verify_response('flavor-extra-specs-list-resp',
subs, response)
def test_flavor_extra_specs_create(self):
return self._flavor_extra_specs_create()
def test_flavor_extra_specs_update(self):
subs = {'value1': 'new_value1'}
self._flavor_extra_specs_create()
response = self._do_put('flavors/1/os-extra_specs/key1',
'flavor-extra-specs-update-req', subs)
self.assertEqual(response.status, 200)
return self._verify_response('flavor-extra-specs-update-resp',
subs, response)
def test_flavor_extra_specs_delete(self):
self._flavor_extra_specs_create()
response = self._do_delete('flavors/1/os-extra_specs/key1')
self.assertEqual(response.status, 200)
self.assertEqual(response.read(), '')
class FlavorExtraSpecsSampleXmlTests(FlavorExtraSpecsSampleJsonTests):
ctype = 'xml'
class FpingSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.fping.Fping")
def setUp(self):
super(FpingSampleJsonTests, self).setUp()
def fake_check_fping(self):
pass
self.stubs.Set(utils, "execute", test_fping.execute)
self.stubs.Set(fping.FpingController, "check_fping",
fake_check_fping)
def test_get_fping(self):
self._post_server()
response = self._do_get('os-fping')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('fping-get-resp', subs, response)
def test_get_fping_details(self):
uuid = self._post_server()
response = self._do_get('os-fping/%s' % (uuid))
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('fping-get-details-resp', subs, response)
class FpingSampleXmlTests(FpingSampleJsonTests):
ctype = 'xml'
class ExtendedAvailabilityZoneJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_availability_zone"
".Extended_availability_zone")
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
return self._verify_response('server-get-resp', subs, response)
def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
return self._verify_response('servers-detail-resp', subs, response)
class ExtendedAvailabilityZoneXmlTests(ExtendedAvailabilityZoneJsonTests):
ctype = 'xml'
class EvacuateJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".evacuate.Evacuate")
def test_server_evacuate(self):
uuid = self._post_server()
req_subs = {
'host': 'TargetHost',
"adminPass": "MySecretPass",
"onSharedStorage": 'False'
}
def fake_service_is_up(self, service):
"""Simulate validation of instance host is down."""
return False
self.stubs.Set(service_group_api.API, 'service_is_up',
fake_service_is_up)
response = self._do_post('servers/%s/action' % uuid,
'server-evacuate-req', req_subs)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('server-evacuate-resp', subs,
response)
class EvacuateXmlTest(EvacuateJsonTest):
ctype = 'xml'
class FloatingIpDNSJsonTest(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.floating_ip_dns."
"Floating_ip_dns")
domain = 'domain1.example.org'
name = 'instance1'
scope = 'public'
project = 'project1'
dns_type = 'A'
ip = '192.168.1.1'
def _create_or_update(self):
subs = {'domain': self.domain,
'project': self.project,
'scope': self.scope}
response = self._do_put('os-floating-ip-dns/%s' % self.domain,
'floating-ip-dns-create-or-update-req', subs)
self.assertEqual(response.status, 200)
self._verify_response('floating-ip-dns-create-or-update-resp', subs,
response)
def _create_or_update_entry(self):
subs = {'ip': self.ip, 'dns_type': self.dns_type}
response = self._do_put('os-floating-ip-dns/%s/entries/%s'
% (self.domain, self.name),
'floating-ip-dns-create-or-update-entry-req',
subs)
self.assertEqual(response.status, 200)
subs.update({'name': self.name, 'domain': self.domain})
self._verify_response('floating-ip-dns-create-or-update-entry-resp',
subs, response)
def test_floating_ip_dns_list(self):
self._create_or_update()
response = self._do_get('os-floating-ip-dns')
self.assertEqual(response.status, 200)
subs = {'domain': self.domain,
'project': self.project,
'scope': self.scope}
return self._verify_response('floating-ip-dns-list-resp', subs,
response)
def test_floating_ip_dns_create_or_update(self):
self._create_or_update()
def test_floating_ip_dns_delete(self):
self._create_or_update()
response = self._do_delete('os-floating-ip-dns/%s' % self.domain)
self.assertEqual(response.status, 202)
def test_floating_ip_dns_create_or_update_entry(self):
self._create_or_update_entry()
def test_floating_ip_dns_entry_get(self):
self._create_or_update_entry()
response = self._do_get('os-floating-ip-dns/%s/entries/%s'
% (self.domain, self.name))
self.assertEqual(response.status, 200)
subs = {'domain': self.domain,
'ip': self.ip,
'name': self.name}
return self._verify_response('floating-ip-dns-entry-get-resp', subs,
response)
def test_floating_ip_dns_entry_delete(self):
self._create_or_update_entry()
response = self._do_delete('os-floating-ip-dns/%s/entries/%s'
% (self.domain, self.name))
self.assertEqual(response.status, 202)
def test_floating_ip_dns_entry_list(self):
self._create_or_update_entry()
response = self._do_get('os-floating-ip-dns/%s/entries/%s'
% (self.domain, self.ip))
self.assertEqual(response.status, 200)
subs = {'domain': self.domain,
'ip': self.ip,
'name': self.name}
return self._verify_response('floating-ip-dns-entry-list-resp', subs,
response)
class FloatingIpDNSXmlTest(FloatingIpDNSJsonTest):
ctype = 'xml'
class InstanceActionsSampleJsonTest(ApiSampleTestBase):
extension_name = ('nova.api.openstack.compute.contrib.instance_actions.'
'Instance_actions')
def setUp(self):
super(InstanceActionsSampleJsonTest, self).setUp()
self.actions = fake_instance_actions.FAKE_ACTIONS
self.events = fake_instance_actions.FAKE_EVENTS
self.instance = test_utils.get_test_instance()
def fake_instance_action_get_by_request_id(context, uuid, request_id):
return copy.deepcopy(self.actions[uuid][request_id])
def fake_instance_actions_get(context, uuid):
return [copy.deepcopy(value) for value in
self.actions[uuid].itervalues()]
def fake_instance_action_events_get(context, action_id):
return copy.deepcopy(self.events[action_id])
def fake_instance_get_by_uuid(context, instance_id):
return self.instance
def fake_get(self, context, instance_uuid):
return {'uuid': instance_uuid}
self.stubs.Set(db, 'action_get_by_request_id',
fake_instance_action_get_by_request_id)
self.stubs.Set(db, 'actions_get', fake_instance_actions_get)
self.stubs.Set(db, 'action_events_get',
fake_instance_action_events_get)
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
self.stubs.Set(compute_api.API, 'get', fake_get)
def test_instance_action_get(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_request_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_action = self.actions[fake_uuid][fake_request_id]
response = self._do_get('servers/%s/os-instance-actions/%s' %
(fake_uuid, fake_request_id))
subs = self._get_regexes()
subs['action'] = '(reboot)|(resize)'
subs['instance_uuid'] = fake_uuid
subs['integer_id'] = '[0-9]+'
subs['request_id'] = fake_action['request_id']
subs['start_time'] = fake_action['start_time']
subs['result'] = '(Success)|(Error)'
subs['event'] = '(schedule)|(compute_create)'
return self._verify_response('instance-action-get-resp', subs,
response)
def test_instance_actions_list(self):
fake_uuid = fake_instance_actions.FAKE_UUID
response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid))
subs = self._get_regexes()
subs['action'] = '(reboot)|(resize)'
subs['integer_id'] = '[0-9]+'
subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}')
return self._verify_response('instance-actions-list-resp', subs,
response)
class InstanceActionsSampleXmlTest(InstanceActionsSampleJsonTest):
ctype = 'xml'
class ImageSizeSampleJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib"
".image_size.Image_size")
def test_show(self):
# Get api sample of one single image details request.
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s' % image_id)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['image_id'] = image_id
return self._verify_response('image-get-resp', subs, response)
def test_detail(self):
# Get api sample of all images details request.
response = self._do_get('images/detail')
subs = self._get_regexes()
return self._verify_response('images-details-get-resp', subs, response)
class ImageSizeSampleXmlTests(ImageSizeSampleJsonTests):
ctype = 'xml'
class ConfigDriveSampleJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.config_drive."
"Config_drive")
def setUp(self):
super(ConfigDriveSampleJsonTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fake.stub_out_image_service(self.stubs)
def test_config_drive_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
# config drive can be an uuid or empty value
subs['cdrive'] = '(%s)?' % subs['uuid']
return self._verify_response('server-config-drive-get-resp', subs,
response)
def test_config_drive_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
# config drive can be an uuid or empty value
subs['cdrive'] = '(%s)?' % subs['uuid']
return self._verify_response('servers-config-drive-details-resp',
subs, response)
class ConfigDriveSampleXmlTest(ConfigDriveSampleJsonTest):
ctype = 'xml'
class FlavorAccessSampleJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.flavor_access."
"Flavor_access")
def _get_flags(self):
f = super(FlavorAccessSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# FlavorAccess extension also needs Flavormanage to be loaded.
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
return f
def _add_tenant(self):
subs = {
'tenant_id': 'fake_tenant',
'flavor_id': 10
}
response = self._do_post('flavors/10/action',
'flavor-access-add-tenant-req',
subs)
self.assertEqual(response.status, 200)
return self._verify_response('flavor-access-add-tenant-resp',
subs, response)
def _create_flavor(self):
subs = {
'flavor_id': 10,
'flavor_name': 'test_flavor'
}
response = self._do_post("flavors",
"flavor-access-create-req",
subs)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response("flavor-access-create-resp",
subs, response)
def test_flavor_access_create(self):
self._create_flavor()
def test_flavor_access_detail(self):
response = self._do_get('flavors/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('flavor-access-detail-resp',
subs, response)
def test_flavor_access_list(self):
self._create_flavor()
self._add_tenant()
flavor_id = 10
response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
self.assertEqual(response.status, 200)
subs = {
'flavor_id': flavor_id,
'tenant_id': 'fake_tenant',
}
return self._verify_response('flavor-access-list-resp',
subs, response)
def test_flavor_access_show(self):
flavor_id = 1
response = self._do_get('flavors/%s' % flavor_id)
self.assertEqual(response.status, 200)
subs = {
'flavor_id': flavor_id
}
subs.update(self._get_regexes())
return self._verify_response('flavor-access-show-resp',
subs, response)
def test_flavor_access_add_tenant(self):
self._create_flavor()
response = self._add_tenant()
def test_flavor_access_remove_tenant(self):
self._create_flavor()
self._add_tenant()
subs = {
'tenant_id': 'fake_tenant',
}
response = self._do_post('flavors/10/action',
"flavor-access-remove-tenant-req",
subs)
self.assertEqual(response.status, 200)
return self._verify_response('flavor-access-remove-tenant-resp',
{}, response)
class FlavorAccessSampleXmlTests(FlavorAccessSampleJsonTests):
ctype = 'xml'
class HypervisorsSampleJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
"Hypervisors")
def test_hypervisors_list(self):
response = self._do_get('os-hypervisors')
self.assertEqual(response.status, 200)
return self._verify_response('hypervisors-list-resp',
{}, response)
def test_hypervisors_search(self):
response = self._do_get('os-hypervisors/fake/search')
self.assertEqual(response.status, 200)
return self._verify_response('hypervisors-search-resp',
{}, response)
def test_hypervisors_servers(self):
response = self._do_get('os-hypervisors/fake/servers')
self.assertEqual(response.status, 200)
return self._verify_response('hypervisors-servers-resp',
{}, response)
def test_hypervisors_show(self):
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response('hypervisors-show-resp',
subs, response)
def test_hypervisors_statistics(self):
response = self._do_get('os-hypervisors/statistics')
self.assertEqual(response.status, 200)
return self._verify_response('hypervisors-statistics-resp',
{}, response)
def test_hypervisors_uptime(self):
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
self.stubs.Set(compute_api.HostAPI,
'get_host_uptime', fake_get_host_uptime)
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
self.assertEqual(response.status, 200)
subs = {
'hypervisor_id': hypervisor_id,
}
return self._verify_response('hypervisors-uptime-resp',
subs, response)
class HypervisorsSampleXmlTests(HypervisorsSampleJsonTests):
ctype = "xml"
class AttachInterfacesSampleJsonTest(ServersSampleBase):
extension_name = ('nova.api.openstack.compute.contrib.attach_interfaces.'
'Attach_interfaces')
def setUp(self):
super(AttachInterfacesSampleJsonTest, self).setUp()
def fake_list_ports(self, *args, **kwargs):
uuid = kwargs.get('device_id', None)
if not uuid:
raise exception.InstanceNotFound(instance_id=None)
port_data = {
"id": "ce531f90-199f-48c0-816c-13e38010b442",
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "fa:16:3e:4c:2c:30",
"fixed_ips": [
{
"ip_address": "192.168.1.3",
"subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
}
],
"device_id": uuid,
}
ports = {'ports': [port_data]}
return ports
def fake_show_port(self, context, port_id=None):
if not port_id:
raise exception.PortNotFound(port_id=None)
port_data = {
"id": port_id,
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "fa:16:3e:4c:2c:30",
"fixed_ips": [
{
"ip_address": "192.168.1.3",
"subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
}
],
"device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
}
port = {'port': port_data}
return port
def fake_attach_interface(self, context, instance,
network_id, port_id,
requested_ip='192.168.1.3'):
if not network_id:
network_id = "fake_net_uuid"
if not port_id:
port_id = "fake_port_uuid"
network_info = [
{
'bridge': 'br-100',
'id': network_id,
'cidr': '192.168.1.0/24',
'vlan': '101',
'injected': 'False',
'multi_host': 'False',
'bridge_interface': 'bridge_interface'
},
{
"vif_uuid": port_id,
"network_id": network_id,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "fa:16:3e:4c:2c:30",
"fixed_ips": [
{
"ip_address": requested_ip,
"subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
}
],
"device_id": instance['uuid'],
}
]
return network_info
def fake_detach_interface(self, context, instance, port_id):
pass
self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
self.stubs.Set(network_api.API, 'show_port', fake_show_port)
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
self.flags(quantum_auth_strategy=None)
self.flags(quantum_url='http://anyhost/')
self.flags(quantum_url_timeout=30)
def generalize_subs(self, subs, vanilla_regexes):
subs['subnet_id'] = vanilla_regexes['uuid']
subs['net_id'] = vanilla_regexes['uuid']
subs['port_id'] = vanilla_regexes['uuid']
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
subs['ip_address'] = vanilla_regexes['ip']
return subs
def test_list_interfaces(self):
instance_uuid = self._post_server()
response = self._do_get('servers/%s/os-interface' % instance_uuid)
self.assertEqual(response.status, 200)
subs = {
'ip_address': '192.168.1.3',
'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
'mac_addr': 'fa:16:3e:4c:2c:30',
'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
'port_state': 'ACTIVE'
}
self._verify_response('attach-interfaces-list-resp', subs, response)
def _stub_show_for_instance(self, instance_uuid, port_id):
show_port = network_api.API().show_port(None, port_id)
show_port['port']['device_id'] = instance_uuid
self.stubs.Set(network_api.API, 'show_port', lambda *a, **k: show_port)
def test_show_interfaces(self):
instance_uuid = self._post_server()
port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
self._stub_show_for_instance(instance_uuid, port_id)
response = self._do_get('servers/%s/os-interface/%s' %
(instance_uuid, port_id))
self.assertEqual(response.status, 200)
subs = {
'ip_address': '192.168.1.3',
'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
'mac_addr': 'fa:16:3e:4c:2c:30',
'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
'port_id': port_id,
'port_state': 'ACTIVE'
}
self._verify_response('attach-interfaces-show-resp', subs, response)
def test_create_interfaces(self, instance_uuid=None):
if instance_uuid is None:
instance_uuid = self._post_server()
subs = {
'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
'ip_address': '192.168.1.3',
'port_state': 'ACTIVE',
'mac_addr': 'fa:16:3e:4c:2c:30',
}
self._stub_show_for_instance(instance_uuid, subs['port_id'])
response = self._do_post('servers/%s/os-interface' % instance_uuid,
'attach-interfaces-create-req', subs)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
self._verify_response('attach-interfaces-create-resp',
subs, response)
def test_delete_interfaces(self):
instance_uuid = self._post_server()
port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
response = self._do_delete('servers/%s/os-interface/%s' %
(instance_uuid, port_id))
self.assertEqual(response.status, 202)
self.assertEqual(response.read(), '')
class AttachInterfacesSampleXmlTest(AttachInterfacesSampleJsonTest):
ctype = 'xml'
class SnapshotsSampleJsonTests(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.volumes.Volumes"
create_subs = {
'snapshot_name': 'snap-001',
'description': 'Daily backup',
'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c'
}
def setUp(self):
super(SnapshotsSampleJsonTests, self).setUp()
self.stubs.Set(cinder.API, "get_all_snapshots",
fakes.stub_snapshot_get_all)
self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
def _create_snapshot(self):
self.stubs.Set(cinder.API, "create_snapshot",
fakes.stub_snapshot_create)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
response = self._do_post("os-snapshots",
"snapshot-create-req",
self.create_subs)
return response
def test_snapshots_create(self):
response = self._create_snapshot()
self.assertEqual(response.status, 200)
self.create_subs.update(self._get_regexes())
return self._verify_response("snapshot-create-resp",
self.create_subs, response)
def test_snapshots_delete(self):
self.stubs.Set(cinder.API, "delete_snapshot",
fakes.stub_snapshot_delete)
self._create_snapshot()
response = self._do_delete('os-snapshots/100')
self.assertEqual(response.status, 202)
self.assertEqual(response.read(), '')
def test_snapshots_detail(self):
response = self._do_get('os-snapshots/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('snapshots-detail-resp',
subs, response)
def test_snapshots_list(self):
response = self._do_get('os-snapshots')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('snapshots-list-resp',
subs, response)
def test_snapshots_show(self):
response = self._do_get('os-snapshots/100')
self.assertEqual(response.status, 200)
subs = {
'snapshot_name': 'Default name',
'description': 'Default description'
}
subs.update(self._get_regexes())
return self._verify_response('snapshots-show-resp',
subs, response)
class SnapshotsSampleXmlTests(SnapshotsSampleJsonTests):
ctype = "xml"
class VolumeAttachmentsSampleJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.volumes.Volumes")
def test_attach_volume_to_server(self):
device_name = '/dev/vdd'
self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
self.stubs.Set(compute_manager.ComputeManager,
"reserve_block_device_name",
lambda *a, **k: device_name)
volume = fakes.stub_volume_get(None, context.get_admin_context(),
'a26887c6-c47b-4654-abb5-dfadf7d3f803')
subs = {
'volume_id': volume['id'],
'device': device_name
}
server_id = self._post_server()
response = self._do_post('servers/%s/os-volume_attachments'
% server_id,
'attach-volume-to-server-req', subs)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
self._verify_response('attach-volume-to-server-resp',
subs, response)
def _stub_compute_api_get_instance_bdms(self, server_id):
def fake_compute_api_get_instance_bdms(self, context, instance):
bdms = [
{'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803',
'instance_uuid': server_id,
'device_name': '/dev/sdd'},
{'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f804',
'instance_uuid': server_id,
'device_name': '/dev/sdc'}
]
return bdms
self.stubs.Set(compute_api.API, "get_instance_bdms",
fake_compute_api_get_instance_bdms)
def _stub_compute_api_get(self):
def fake_compute_api_get(self, context, instance_id):
return {'uuid': instance_id}
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
def test_list_volume_attachments(self):
server_id = self._post_server()
self._stub_compute_api_get_instance_bdms(server_id)
response = self._do_get('servers/%s/os-volume_attachments'
% server_id)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('list-volume-attachments-resp',
subs, response)
def test_volume_attachment_detail(self):
server_id = self._post_server()
attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
self._stub_compute_api_get_instance_bdms(server_id)
self._stub_compute_api_get()
response = self._do_get('servers/%s/os-volume_attachments/%s'
% (server_id, attach_id))
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('volume-attachment-detail-resp',
subs, response)
def test_volume_attachment_delete(self):
server_id = self._post_server()
attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
self._stub_compute_api_get_instance_bdms(server_id)
self._stub_compute_api_get()
self.stubs.Set(compute_api.API, 'detach_volume', lambda *a, **k: None)
response = self._do_delete('servers/%s/os-volume_attachments/%s'
% (server_id, attach_id))
self.assertEqual(response.status, 202)
self.assertEqual(response.read(), '')
class VolumeAttachmentsSampleXmlTest(VolumeAttachmentsSampleJsonTest):
ctype = 'xml'
class VolumesSampleJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.volumes.Volumes")
def _get_volume_id(self):
return 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
def _stub_volume(self, id, displayname="Volume Name",
displaydesc="Volume Description", size=100):
volume = {
'id': id,
'size': size,
'availability_zone': 'zone1:host1',
'instance_uuid': '3912f2b4-c5ba-4aec-9165-872876fe202e',
'mountpoint': '/',
'status': 'in-use',
'attach_status': 'attached',
'name': 'vol name',
'display_name': displayname,
'display_description': displaydesc,
'created_at': "2008-12-01T11:01:55",
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'Backup'}
}
return volume
def _stub_volume_get(self, context, volume_id):
return self._stub_volume(volume_id)
def _stub_volume_delete(self, context, *args, **param):
pass
def _stub_volume_get_all(self, context, search_opts=None):
id = self._get_volume_id()
return [self._stub_volume(id)]
def _stub_volume_create(self, context, size, name, description, snapshot,
**param):
id = self._get_volume_id()
return self._stub_volume(id)
def setUp(self):
super(VolumesSampleJsonTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(cinder.API, "delete", self._stub_volume_delete)
self.stubs.Set(cinder.API, "get", self._stub_volume_get)
self.stubs.Set(cinder.API, "get_all", self._stub_volume_get_all)
def _post_volume(self):
subs_req = {
'volume_name': "Volume Name",
'volume_desc': "Volume Description",
}
self.stubs.Set(cinder.API, "create", self._stub_volume_create)
response = self._do_post('os-volumes', 'os-volumes-post-req',
subs_req)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs.update(subs_req)
return self._verify_response('os-volumes-post-resp', subs, response)
def test_volumes_show(self):
subs = {
'volume_name': "Volume Name",
'volume_desc': "Volume Description",
}
vol_id = self._get_volume_id()
response = self._do_get('os-volumes/%s' % vol_id)
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response('os-volumes-get-resp', subs, response)
def test_volumes_index(self):
subs = {
'volume_name': "Volume Name",
'volume_desc': "Volume Description",
}
response = self._do_get('os-volumes')
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response('os-volumes-index-resp', subs, response)
def test_volumes_detail(self):
# For now, index and detail are the same.
# See the volumes api
subs = {
'volume_name': "Volume Name",
'volume_desc': "Volume Description",
}
response = self._do_get('os-volumes/detail')
self.assertEqual(response.status, 200)
subs.update(self._get_regexes())
return self._verify_response('os-volumes-detail-resp', subs, response)
def test_volumes_create(self):
return self._post_volume()
def test_volumes_delete(self):
self._post_volume()
vol_id = self._get_volume_id()
response = self._do_delete('os-volumes/%s' % vol_id)
self.assertEqual(response.status, 202)
self.assertEqual(response.read(), '')
class VolumesSampleXmlTest(VolumesSampleJsonTest):
ctype = 'xml'
| apache-2.0 | -4,991,101,234,518,709,000 | 37.464727 | 79 | 0.565975 | false |
brucedjones/pyck | examples/stl_test.py | 1 | 2459 | import pyck
domain = [100.0, 100.0, 500.0]
h = domain[0] / 100
smoothingKernelFunc = 2
speedsound = 1
density = 1
shearmodulus = 1
bulkmodulus = 1
# Create a packer, see packers directory for options
Hcp = pyck.HcpPacker(domain, h)
pack = pyck.StructuredPack(Hcp)
tibia_min = [154.2328, 204.0634, -853.9525] # Read from the tibia STL file
tibia_max = [226.9384, 268.8318, -468.3400]
tibia_len = [tibia_max[0] - tibia_min[0], tibia_max[1] -
tibia_min[1], tibia_max[2] - tibia_min[2]]
tibia_center = [tibia_min[0] + tibia_len[0] / 2, tibia_min[1] +
tibia_len[1] / 2, tibia_min[2] + tibia_len[2] / 2]
tibia_scale = 1
tibia_len[0] = tibia_len[0] * tibia_scale
tibia_len[1] = tibia_len[1] * tibia_scale
tibia_len[2] = tibia_len[2] * tibia_scale
tibia_dcenter = [domain[0] / 2, domain[1] / 2, domain[2] / 2]
tibia_translation = [tibia_dcenter[0] - tibia_center[0],
tibia_dcenter[1] - tibia_center[1], tibia_dcenter[2] - tibia_center[2]]
stlShape = pyck.StlShape(1, 'tibia_low.stl', tibia_translation)
#cube = pyck.Cuboid(2,[0.2,0.2,0.2],[0.6,0.6,0.6]);
# Map the shapes and generate the pack
pack.AddShape(stlShape)
# pack.AddShape(cube);
pack.Process()
# Create a new model from the pack
model = pyck.Model(pack)
# Create a new field of n-dimensional integers
# Arguments are CreateIntField(label,dimensions)
# label - label for this field in the vtp file
# dimensions - dimensionality of this field, doesnt have to correspond to model dimensions
# Create field of doubles in the same way with CreateDoubleField
stateField = model.CreateIntField("State", 1)
# Arguments are SetIntField(field,tag,value(s))
# field - returned from CreateIntField
# tag - tag applied to particles during shape Mapping
# value(s) - singular value or array of values [v1, v2,...vn] to set of
# particles with a matching tag
model.SetIntField(stateField, 1, 10)
model.SetIntField(stateField, 2, 20)
# Overwrite some parameters
# Arguments are SetParameter(Label,Value)
model.SetParameter("MaxSteps", "100")
model.SetParameter("Mass", "0.5")
# Or use a python dictionary to overwrite parameters
parameters = pyck.Parameters({'ViscAlpha': '0.1', 'ViscBeta': '0.2'})
model.SetParameters(parameters)
# Create a file writer, in this case VTP according to spark format
writer = pyck.SparkWriter()
# Write the VTP file
model.Serialize("tibia.vtp", writer)
| mit | -7,106,577,362,135,716,000 | 32.633803 | 92 | 0.681578 | false |
Bam4d/neon | neon/backends/backend.py | 1 | 87983 | # ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Defines Tensor and Backend class
"""
import numpy as np
import logging
from math import ceil
logger = logging.getLogger(__name__)
class OpCollection(object):
"""
A collection of the set of operation strings
"""
zero_operand_ops = {"rand", "onehot"}
unary_ops = {"finite", "neg", "abs", "sgn", "sqrt", "sqr", "exp", "log",
"exp2", "log2", "sig", "sig2", "tanh", "tanh2", "transpose",
"safelog"}
binary_ops = {"assign", "add", "sub", "mul", "div", "eq", "ne", "lt", "le",
"gt", "ge", "pow", "minimum", "maximum", "dot"}
reduction_ops = {"sum", "max", "min", "argmax", "argmin"}
float_ops = zero_operand_ops | unary_ops | binary_ops
ew_ops = float_ops - {'dot', 'transpose'}
class Tensor(object):
"""
The n-dimensional array data structure. GPUTensor and Tensor inherits
Tensor. Depending on backend, may have additional keyword arguments.
All non-keywords arguments shall be in exact same order as Tensor.
Arguments:
backend (Backend): backend of the tensor.
shape (tuple, optional): shape of the tensor.
dtype (numpy.ndtype, optional): underlying data type of the elements.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor will
persist across multiple begin and
end calls. Setting to False may
provide a performance increase if
values do not need to be maintained
across such calls
See also:
GPUTensor class, Tensor class
Notes:
Unlike numpy, in this implementation we never collapse dimensions, and
the minimal number of dimensions will be _min_dims (currently set to
2). So a wrapped scalar will have dimension 1x1.
"""
def __init__(self,
backend,
shape=None,
dtype=np.float32,
name=None,
persist_values=True):
self.backend = backend
self.shape = shape
self.dtype = dtype
self.name = name
self.persist_values = persist_values
self._min_dims = 2
def __str__(self):
"""
Returns a string representation of this Tensor.
Returns:
str: the representation.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def __repr__(self):
"""
Returns a more unambiguous string representation of the Tensor.
Returns:
str: the string representation.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def __len__(self):
"""
Return the size of the leading dimension of self.
Returns:
int: the size of the leading dimension.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def __setitem__(self, index, value):
"""
Assign the specified value to a subset of elements found via slice
style indexing along each dimension. e.g. A[5:10, :] = 4.5.
Each slice consists of start_idx:stop_idx:step_size triplets. If
step_size isn't specified it defaults to 1. If start_idx isn't
specified it defaults to 0. If stop_idx isn't specified it defaults
to the total number of elements along that dimension. As such a slice
value of ':' allows one to select all elements along that dimension.
Arguments:
index (int, slice, tuple): indices of each dimension's slice.
value (numeric array, Tensor): values to be assigned to the
extracted element subset. If an
array it should be the same shape
as what key indexes (or be
broadcastable as such).
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def __getitem__(self, index):
"""
Extract a subset view of the items via slice style indexing
along each dimension. e.g. A[5:10, :]. Each slice consists of
start_idx:stop_idx:step_size triplets. If step_size isn't specified it
defaults to 1. If start_idx isn't specified it defaults to 0. If
stop_idx isn't specified it defaults to the total number of elements
along that dimension. As such a slice value of ':' allows one to
select all elements along that dimension.
Arguments:
index (int, slice, tuple): indices of each dimension's slice.
Returns:
Tensor: view of self corresponding to the subset items.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def _assign(self, value):
"""
Assign an input value to the Tensor. The NervanaCPU does clipping
for int and uint types, when overflow happens
Arguments:
value (Tensor, OpTreNode, numeric): the value to be assigned.
"""
raise NotImplementedError()
def set(self, ary):
"""
Copy host array to the tensor.
Arguments:
ary (numpy.ndarray): host array, needs to be contiguous
Returns:
Tensor: self
"""
raise NotImplementedError()
def get(self):
"""
Copy tensor to host as numpy array.
Returns:
numpy.ndarray: A host numpy array
"""
raise NotImplementedError()
def asnumpyarray(self):
"""
Convert the tensor to an in host memory `numpy.ndarray`. A copy of the
data may be made depending on where the Tensor normally resides.
Returns:
numpy.ndarray view or copy of the Tensor data.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def take(self, indices, axis, out=None):
"""
Select a subset of elements from an array across an axis
Arguments:
indices (Tensor, numpy ndarray): indicies of elements to select
axis (int): axis across which to select the values
out (Tensor, numpy ndarray, optional): place the resultant values
into this array if
specified.
Return:
Tensor: Tensor with selected values
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def fill(self, value):
"""
Assign specified value to each element of this Tensor.
Arguments:
value (numeric): The value to be assigned to each element.
Return:
Tensor: updated view of the data.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def copy(self, a):
"""
Construct and return a deep copy of the Tensor passed.
Arguments:
a (Tensor): the object to copy
Returns:
Tensor: new array object with the same values as tsr.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def copy_from(self, a):
"""
Copy contents from `a`.
Arguments:
a (numpy.ndarray): the host-resident object to copy from
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def reshape(self, *shape):
"""
Adjusts the dimensions of the data to the specified shape. The number
of elements represented by the new shape must be the same as before.
Arguments:
shape (int, list): new length of each dimension
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
@property
def T(self):
"""
Return a transposed view of the data.
Returns:
Tensor: transposed view of self.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def transpose(self, out=None):
"""
Return a transposed view of the data. Alias of .T property needed for
MOP compatibility.
Arguments:
out (Tensor, numpy ndarray, optional): place the resultant values
into this array if
specified.
Returns:
Tensor: transposed view of self.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def hist(self, tag):
"""
Compute a histogram of the current tensor values.
Arguments:
tag (string): Tag to identify the current state of the tensor,
useful for disambiguating multiple histograms of the
same tensor at different points in time.
Returns:
Tensor containing the histogram data.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def __add__(self, other):
"""
Perform `add` operations.
Arguments:
other: the right-hand side operand
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("add", self, other)
def __sub__(self, other):
return OpTreeNode.build("sub", self, other)
def __mul__(self, other):
return OpTreeNode.build("mul", self, other)
def __div__(self, other):
return OpTreeNode.build("div", self, other)
def __truediv__(self, other):
return OpTreeNode.build("div", self, other)
def __pow__(self, other):
return OpTreeNode.build("pow", self, other)
def __radd__(self, other):
return OpTreeNode.build("add", other, self)
def __rsub__(self, other):
return OpTreeNode.build("sub", other, self)
def __rmul__(self, other):
return OpTreeNode.build("mul", other, self)
def __rdiv__(self, other):
return OpTreeNode.build("div", other, self)
def __rtruediv__(self, other):
return OpTreeNode.build("div", other, self)
def __rpow__(self, other):
return OpTreeNode.build("pow", other, self)
def __eq__(self, other):
return OpTreeNode.build("eq", self, other)
def __ne__(self, other):
return OpTreeNode.build("ne", self, other)
def __lt__(self, other):
return OpTreeNode.build("lt", self, other)
def __le__(self, other):
return OpTreeNode.build("le", self, other)
def __gt__(self, other):
return OpTreeNode.build("gt", self, other)
def __ge__(self, other):
return OpTreeNode.build("ge", self, other)
def __abs__(self):
return OpTreeNode.build("abs", self, None)
def __neg__(self):
return OpTreeNode.build("neg", self, None)
class Backend(object):
"""
Backend interface used to manipulate Tensor data. This abstract base class
defines what operations each concrete backend must support.
NervanaGPU and NervanaCPU inherit Backend.
Arguments:
rng_seed (int, optional): random number generator seed value
default_dtype (numpy.ndtype, optional): Elemental data type to use when
creating new tensors if not
otherwise specified. Defaults
to np.float32
compat_mode (str, optional): Flag to match implementation of other
libraries. Currently only 'caffe' is
supported, defaults to None.
"""
def __init__(self, rng_seed=None, default_dtype=np.float32,
compat_mode=None):
# dtype
self.default_dtype = default_dtype
# use RandomState instead of seed
self.rng = np.random.RandomState(rng_seed)
self.init_rng_state = self.rng.get_state() # for resetting state
# batch size
self.bsz = None
self._min_dims = 2
if compat_mode is not None:
if compat_mode == 'caffe':
self.set_caffe_compat()
else:
raise ValueError('%s mode not supported currently' % compat_mode)
else:
self.compat_mode = None
def output_dim(self, X, S, padding, strides, pooling=False):
"""
compute along 1 dimension, with these sizes, what will be the output dimension
Arguments:
X (int): input data dimension
S (int): filter dimension
padding (int): padding on each side
strides (int): striding
pooling (bool): flag for setting pooling layer size
"""
if self.check_caffe_compat() and pooling:
size = int(ceil(float(X - S + 2 * padding)/strides)) + 1
if padding > 0 and (size - 1)*strides >= X + padding:
# decrement size if last pooling op is completely in padding
size -= 1
else:
# normal neon output size determination
size = (X - S + 2 * padding)/strides + 1
return size
def set_caffe_compat(self):
"""
Set flag to make layers compatible with caffe in terms of conv and pool
layer output size determination and dropout layer implementation
"""
self.compat_mode = 'caffe'
def check_caffe_compat(self):
return self.compat_mode == 'caffe'
def iobuf(self, dim0, x=None, dtype=None, name=None, persist_values=True,
shared=None, parallelism=None):
"""
Allocate input and output buffer for layer based on batch size. This
is used because the layer does not know about the batch size.
Arguments:
dim0 (tuple or int): I/O buffer dimension for layer (without the
axis specifying the batch size).
x (data-type, optional): If present and not None, `x` will be
returned directly. `x` will be not None if
the buffer has already been allocated.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor will
persist across multiple begin and
end calls. Setting to False may
provide a performance increase if
values do not need to be maintained
across such calls
shared (buffer, optional): If present will attempt to reuse the memory
in shared to allocate the I/O buffer
parallelism (str, optional): Indicates type of parallelism (Data,
Model) employed by this buffer.
Ignored on CPU and GPU backends,
defaults to no parallelism.
Returns:
Tensor: array object
"""
if x is not None:
return x
if isinstance(dim0, tuple):
if (len(dim0) == 2):
bufshape = (dim0[0], dim0[1] * self.bsz)
else:
bufshape = (np.prod(dim0), self.bsz)
else:
bufshape = (dim0, self.bsz)
if shared is not None:
if shared.shape == bufshape:
return shared
else:
return shared.share(bufshape)
else:
return self.zeros(bufshape, dtype=dtype, name=name,
persist_values=persist_values)
def rng_reset(self):
"""
Reset the random state to the state where the Backend is first
initialized.
usually need to do: self.rng.set_state(self.init_rng_state)
"""
raise NotImplementedError()
def execute(self, node):
"""
Execute the optree. There must be one and only one 'assign' op at the
top of the optree when execute is called.
Arguments:
node (OpTreeNode): The op-tree to execute.
"""
pass
def begin(self, block, identifier):
"""
Signal the start of a block of repeated computation (ex. at the start
of a loop). This operation can be used to help the compiler optimize
instruction performance, but has no direct effect on calculations.
It must be book-ended by a corresponding Backend.end() call.
Note that multiple begin calls can appear adjacent in nested loops.
Arguments:
block (Block.attr): identifies the type of computation being worked
on based on Block attribute specified
identifier (int): unique identifier for this particular iteration
of the block. Will typically be something like
epoch number, mini-batch number, and so forth.
See Also:
:py:func:`~neon.backends.backend.Backend.end`,
"""
pass
def end(self, block, identifier):
"""
Signal the corresponding end of a block of repeated computation
(ex. at the end of a loop). This operation can be used to help the
compiler optimize performance, but has no direct effect on
calculations. It must be preceded by a corresponding Backend.begin()
call.
Arguments:
block (Block.attr): identifies the type of computation being worked
on based on Block attribute specified
identifier (int): unique identifier for this particular iteration
of the block. Will typically be something like
epoch number, mini-batch number, and so forth.
See Also:
:py:func:`~neon.backends.backend.Backend.begin`,
"""
pass
def empty(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, without
initializing element values. This is slightly faster than
:py:func:`~neon.backends.Backend.array`,
:py:func:`~neon.backends.Backend.ones`,
:py:func:`~neon.backends.Backend.zeros`, but the values will be
random.
Arguments:
shape (int, list): length of each dimension of the Tensor.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.Backend.array`,
:py:func:`~neon.backends.Backend.zeros`,
:py:func:`~neon.backends.Backend.ones`
"""
raise NotImplementedError()
def array(self, ary, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, populating
elements based on ary values.
Arguments:
ary (array_like): input array object to construct from. Can be
built-in python scalar or list (of lists), or a
numpy.ndarray
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.Backend.empty`,
:py:func:`~neon.backends.Backend.zeros`,
:py:func:`~neon.backends.Backend.ones`
"""
raise NotImplementedError()
def zeros(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, populating
Each element with a value of 0.
Arguments:
shape (int, list): length of each dimension of the Tensor.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.Backend.empty`,
:py:func:`~neon.backends.Backend.ones`,
:py:func:`~neon.backends.Backend.array`
"""
raise NotImplementedError()
def ones(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, populating
Each element with a value of 1.
Arguments:
shape (int, list): length of each dimension of the Tensor.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.zeros`,
:py:func:`~neon.backends.backend.Backend.array`
"""
raise NotImplementedError()
def empty_like(self, other_ary, name=None, persist_values=True):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from other_ary.
Arguments:
other_ary (tensor object): Tensor to inherit the dimensions of.
name (str, optional): name indentifying the tensor (used in printing).
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.Backend.empty`,
:py:func:`~neon.backends.Backend.ones`,
:py:func:`~neon.backends.Backend.array`
"""
raise NotImplementedError()
def zeros_like(self, other_ary, name=None, persist_values=True):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from other_ary and populating each element with a value of 0.
Arguments:
other_ary (tensor object): Tensor to inherit the dimensions of.
name (str, optional): name indentifying the tensor (used in printing).
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.Backend.empty`,
:py:func:`~neon.backends.Backend.ones`,
:py:func:`~neon.backends.Backend.array`
"""
raise NotImplementedError()
def dot(self, a, b, out=None):
"""
Dot product of two Tensors.
Arguments:
a (Tensor): left-hand side operand.
b (Tensor): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Note that this object should differ from
left and right.
Returns:
OpTreeNode: the resulting op-tree from this operation.
"""
return OpTreeNode.build("dot", a, b, out=out)
def compound_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False):
"""
Perform one of the following operations (* is dot product)
C = alpha * A * B + beta * C
C = alpha * A.T * B + beta * C
C = alpha * A * B.T + beta * C
relu: if true, applied before output (and prior to beta addition)
The operation will be short-circuited to: out <- alpha * left * right
if beta has value 0 (the default).
Arguments:
A (Tensor): left-hand side operand.
B (Tensor): right-hand side operand.
C (Tensor): output operand
alpha (float. optional): scale A*B term
beta (float, optional): scale C term before sum
relu (bool, optional): If True apply ReLu non-linearity before
output. Defaults to False.
"""
raise NotImplementedError()
def batched_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False):
"""
Perform one of the following operations:
1. For fprop: A(K, C), B(X,C,N), C(X,K,N) --> call batched_dot(A, B, C)
2. For bprop: A(K, C), B(X,K,N), C(X,C,N) --> call batched_dot(A.T, B, C)
3. For update: A(X,K,N), B(X,C,N), C(K,C) --> call batched_dot(A, B.T, C)
Arguments:
A (Tensor): left-hand input operand
B (Tensor): right-hand input operand
C (Tensor): output operand
alpha (float. optional): scale A*B term
beta (float, optional): scale C term before sum
relu (bool, optional): If True apply ReLu non-linearity before
output. Defaults to False.
"""
raise NotImplementedError()
def make_binary_mask(self, out, keepthresh=0.5):
"""
Create a binary mask for dropout layers.
Arguments:
out (Tensor): Output tensor
keepthresh (float, optional): fraction of ones. Defaults to 0.5
"""
raise NotImplementedError()
def add(self, a, b, out=None):
"""
Perform element-wise addition on the operands, storing the resultant
values in the out Tensor. Each operand and out must have identical
shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("add", a, b, out=out)
def subtract(self, a, b, out=None):
"""
Perform element-wise subtraction on the operands, storing the resultant
values in the out Tensor. Each operand and out must have identical
shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sub", a, b, out=out)
def multiply(self, a, b, out=None):
"""
Perform element-wise multiplication on the operands, storing the
resultant values in the out Tensor. Each operand and out must have
identical shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("mul", a, b, out=out)
def divide(self, a, b, out=None):
"""
Perform element-wise division on the operands, storing the
resultant values in the out Tensor. Each operand and out must have
identical shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("div", a, b, out=out)
def true_divide(self, a, b, out=None):
"""
Here it is an alias of divide.
Instead of the Python traditional 'floor division', this returns a
true division.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("div", a, b, out=out)
def power(self, a, b, out=None):
"""
Perform element-wise raise of tsr values to specified power,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
b (Tensor, numeric): exponentiated value to be applied to
element. Examples include 2 (square),
0.5 (sqaure root).
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("pow", a, b, out=out)
def reciprocal(self, a, out=None):
"""
Perform element-wise reciprocal of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
power (Tensor, numeric): exponentiated value to be applied to
element. Examples include 2 (square),
0.5 (sqaure root).
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("div", 1., a, out=out)
def negative(self, a, out=None):
"""
Perform element-wise negation of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("neg", a, None, out=out)
def sgn(self, a, out=None):
"""
Perform element-wise indication of the sign of Tensor `a`, storing the
result in Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sgn", a, None, out=out)
def absolute(self, a, out=None):
"""
Perform element-wise absolute value of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("abs", a, None, out=out)
def fabs(self, a, out=None):
"""
Perform element-wise absolute value of Tensor `a`, storing the result
in Tensor out. Both Tensor's should have identical shape. Implemented as
an alias of absolute.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("abs", a, None, out=out)
def sqrt(self, a, out=None):
"""
Perform element-wise square-root of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sqrt", a, None, out=out)
def square(self, a, out=None):
"""
Perform element-wise square of Tensor `a`, storing the result in Tensor
out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sqr", a, None, out=out)
def exp(self, a, out=None):
"""
Perform element-wise exponential transformation on Tensor `a`, storing
the result in Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("exp", a, None, out=out)
def exp2(self, a, out=None):
"""
Perform element-wise 2-based exponential transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("exp2", a, None, out=out)
def safelog(self, a, out=None):
"""
Perform element-wise natural logarithm transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape. This log function has built in safety for underflow.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("safelog", a, None, out=out)
def log(self, a, out=None):
"""
Perform element-wise natural logarithm transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("log", a, None, out=out)
def log2(self, a, out=None):
"""
Perform element-wise 2-based logarithm transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("log2", a, None, out=out)
def sig(self, a, out=None):
"""
Perform element-wise sigmoid transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sig", a, None, out=out)
def sig2(self, a, out=None):
"""
Perform element-wise 2-based sigmoid logarithm transformation on
Tensor `a`, storing the result in Tensor out. Both Tensor's should
have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sig2", a, None, out=out)
def tanh(self, a, out=None):
"""
Perform element-wise hyperbolic tangent transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("tanh", a, None, out=out)
def tanh2(self, a, out=None):
"""
Perform element-wise 2-based hyperbolic tangent transformation on Tensor
`a`, storing the result in Tensor out. Both Tensor's should have
identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("tanh2", a, None, out=out)
def finite(self, a, out=None):
"""
Perform element-wise test of finiteness (not infinity or not Not a
Number) on Tensor `a`, storing the result in Tensor out. Both Tensor's
should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("finite", a, None, out=out)
def equal(self, a, b, out=None):
"""
Performs element-wise equality testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("eq", a, b, out=out)
def not_equal(self, a, b, out=None):
"""
Performs element-wise non-equality testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("ne", a, b, out=out)
def less(self, a, b, out=None):
"""
Performs element-wise less than testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("lt", a, b, out=out)
def less_equal(self, a, b, out=None):
"""
Performs element-wise less than or equal testing on each element of
left and right, storing the result in out. Each operand is assumed to
be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("le", a, b, out=out)
def greater(self, a, b, out=None):
"""
Performs element-wise greater than testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only theshape op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("gt", a, b, out=out)
def greater_equal(self, a, b, out=None):
"""
Performs element-wise greater than or equal testing on each element of
left and right, storing the result in out. Each operand is assumed to
be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("ge", a, b, out=out)
def maximum(self, a, b, out=None):
"""
Performs element-wise maximum value assignment based on corresponding
elements of left and right, storing the result in out. Each operand is
assumed to be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("maximum", a, b, out=out)
def minimum(self, a, b, out=None):
"""
Performs element-wise minimum value assignment based on corresponding
elements of left and right, storing the result in out. Each operand is
assumed to be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("minimum", a, b, out=out)
def clip(self, a, a_min, a_max, out=None):
"""
Performs element-wise clipping of Tensor `a`, storing the result in out.
The clipped value will be between [a_min, a_max].
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return self.minimum(self.maximum(a, a_min), a_max, out=out)
def sum(self, a, axis=None, out=None, keepdims=True):
"""
Calculates the summation of the elements along the specified axis.
Arguments:
a (Tensor): the Tensor on which to perform the sum
axis (int, optional): the dimension along which to compute.
If set to None, we will sum over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
if axis is None:
return OpTreeNode.build("sum", OpTreeNode.build("sum", a, None, axis=0),
None, axis=1, out=out)
return OpTreeNode.build("sum", a, None, axis=axis, out=out)
def max(self, a, axis=None, out=None, keepdims=True):
"""
Calculates the maximal element value along the specified axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take max over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
if axis is None:
return OpTreeNode.build("max", OpTreeNode.build("max", a, None, axis=0),
None, axis=1, out=out)
return OpTreeNode.build("max", a, None, axis=axis, out=out)
def min(self, a, axis=None, out=None, keepdims=True):
"""
Calculates the minimal element value along the specified axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take min over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
if axis is None:
return OpTreeNode.build("min", OpTreeNode.build("min", a, None, axis=0),
None, axis=1, out=out)
return OpTreeNode.build("min", a, None, axis=axis, out=out)
def argmax(self, a, axis=1, out=None, keepdims=True):
"""
Calculates the indices of the maximal element value along the specified
axis. If multiple elements contain the maximum, only the indices of
the first are returned.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take argmax over all
dimensions. Defaults to 1
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("argmax", a, None, axis=axis, out=out)
def argmin(self, a, axis=1, out=None, keepdims=True):
"""
Calculates the indices of the minimal element value along the specified
axis. If multiple elements contain the minimum, only the indices of
the first are returned.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take argmin over all
dimensions. Defaults to 1
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("argmin", a, None, axis=axis, out=out)
def mean(self, a, axis=None, partial=None, out=None, keepdims=True):
"""
Calculates the arithmetic mean of the elements along the specified
axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take mean over all
dimensions. Defaults to None
partial (bool, optional): Not currently used.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
shape = a.shape
if axis is None:
return self.multiply(self.sum(a), 1.0 / (shape[0] * shape[1]), out=out)
return self.multiply(self.sum(a, axis=axis), 1.0 / shape[axis], out=out)
def var(self, a, axis=None, partial=None, out=None, keepdims=True):
"""
Calculates the variance of the elements along the specified
axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take var over all
dimensions. Defaults to None
partial (bool, optional): Not currently used.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
if axis is None:
return self.mean(self.square(a - self.mean(a)), out=out)
return self.mean(self.square(a - self.mean(a, axis=axis)), axis=axis, out=out)
def std(self, a, axis=None, partial=None, out=None, keepdims=True):
"""
Calculates the standard deviation of the elements along the specified
axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take std over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
partial (bool, optional): Not currently used.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
return self.sqrt(self.var(a, axis=axis, partial=partial, out=out))
def take(self, a, indices, axis, out=None):
"""
Extract elements based on the indices along a given axis.
Arguments:
a (Tensor): the Tensor on which to perform the operation
indices (Tensor, numpy ndarray): indicies of elements to select
axis (int, optional): the dimension along which to compute.
If set to None, we will extract over all
dimensions (flattened first)
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
"""
return a.take(indices, axis, out)
def onehot(self, indices, axis, out=None):
"""
Generate optree for converting `indices` to a onehot representation
Arguments:
indices (Tensor): Elements must be of numpy integer type for gpu
onehot to work.
axis (int): the axis along the feature length dimension
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
if axis not in (0, 1):
raise ValueError("bad axis for onehot")
return OpTreeNode.build("onehot", None, None, idx=indices, axis=axis, out=out)
def update_fc_bias(self, err, out):
"""
Compute the updated bias gradient for a fully connected network layer.
Arguments:
err (Tensor): backpropagated error
out (Tensor): Where to store the updated gradient value.
"""
self.ng.sum(err, axis=1, out=out)
def add_fc_bias(self, inputs, bias):
"""
Add the bias for a fully connected network layer.
Arguments:
inputs (Tensor): the input to update.
bias (Tensor): the amount to increment
"""
self.ng.add(inputs, bias, out=inputs)
def conv_layer(self, dtype,
N, C, K,
D=1, H=1, W=1,
T=1, R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1,
relu=False, bsum=False, deterministic_update=False):
"""
Create a new ConvLayer parameter object.
This is then passed as an argument to all the convolution operations.
Arguments:
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
N (int): Number of images in mini-batch
C (int): Number of input feature maps
K (int): Number of output feature maps
D (int, optional): Depth of input image. Defaults to 1
H (int, optional): Height of input image. Defaults to 1
W (int, optional): Width of input image. Defaults to 1
T (int, optional): Depth of filter kernel. Defaults to 1
R (int, optional): Height of filter kernel. Defaults to 1
S (int, optional): Width of filter kernel. Defaults to 1
pad_d (int, optional): amount of zero-padding around the depth edge
Defaults to 0.
pad_h (int, optional): amount of zero-padding around the height edge
Defaults to 0.
pad_w (int, optional): amount of zero-padding around the width edge
Defaults to 0.
str_d (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_h (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_w (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
relu (bool, optional): apply a relu transform to the output for
fprop or bprop. Defaults to False
bsum (bool, optional): calculate the sum along the batchnorm axis
for fprop or bprop. Outputs an fp32 tensor
of size Kx1. Defaults to False.
deterministic_update (bool, optional): eleminate atomic adds in the
update operation. Increases
reproducibility but runs
slower. Defaults to False.
"""
raise NotImplementedError()
def fprop_conv(self, layer, I, F, O, alpha=1.0, relu=False, repeat=1):
"""
Forward propagate the inputs of a convolutional network layer to
produce output
Arguments:
layer: the conv layer as a parameter object
I (Tensor): inputs
F (Tensor): the weights (filters)
O (Tensor): outputs
alpha (float, optional): linear scaling. Defaults to 1.0
relu (bool, optional): apply ReLu before output. Default not to.
repeat (int, optional): Repeat this operation the specified number
of times. Defaults to 1.
"""
raise NotImplementedError()
def bprop_conv(self, layer, F, E, grad_I, alpha=1.0, repeat=1):
"""
Backward propagate the error through a convolutional network layer.
Arguments:
layer: the conv layer as a parameter object
F (Tensor): the weights (filters)
E (Tensor): errors
grad_I (Tensor): gradient to inputs (output delta)
alpha (float, optional): linear scaling. Defaults to 1.0
repeat (int, optional): Repeat this operation the specified number
of times. Defaults to 1.
"""
raise NotImplementedError()
def update_conv(self, layer, I, E, grad_F, alpha=1.0, repeat=1):
"""
Compute the updated gradient for a convolutional network layer.
Arguments:
layer: the conv layer as a parameter object
I (Tensor): the inputs
E (Tensor): the errors
grad_F (Tensor): filter gradients (weights) to update.
alpha (float, optional): linear scaling. Defaults to 1.0
repeat (int, optional): Repeat this operation the specified number
of times. Defaults to 1.
"""
raise NotImplementedError()
def deconv_layer(self, dtype,
N, C, K,
P, Q,
R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1):
"""
Create a new Deconvolution parameter object.
This then is passed as an argument to all deconvolution kernels.
Arguments:
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
N (int): Number of images in mini-batch
C (int): Number of input feature maps
K (int): Number of output feature maps
P (int): Height of output
Q (int): Width of output
R (int, optional): Height of filter kernel. Defaults to 1
S (int, optional): Width of filter kernel. Defaults to 1
pad_d (int, optional): amount of zero-padding around the depth edge
Defaults to 0.
pad_h (int, optional): amount of zero-padding around the height edge
Defaults to 0.
pad_w (int, optional): amount of zero-padding around the width edge
Defaults to 0.
str_d (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_h (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_w (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
raise NotImplementedError()
def pool_layer(self, dtype,
op, N, C,
D=1, H=1, W=1,
J=1, T=1, R=1, S=1,
pad_j=0, pad_d=0, pad_h=0, pad_w=0,
str_j=None, str_d=None, str_h=None, str_w=None):
"""
Create a new PoolLayer parameter object.
This then is passed as an argument to all pooling kernels.
Arguments:
op (str): "max", "avg", "l2" pooling (currently bprop only supports
max, but not avg and l2)
N (int): Number of images in mini-batch
C (int): Number of input feature maps
D (int, optional): Depth of input image. Defaults to 1
H (int, optional): Height of input image. Defaults to 1
W (int, optional): Width of input image. Defaults to 1
J (int, optional): Size of feature map pooling window
(maxout n_pieces). Defaults to 1
T (int, optional): Depth of pooling window. Defaults to 1
R (int, optional): Height of pooling window. Defaults to 1
S (int, optional): Width of pooling window. Defaults to 1
pad_j (int, optional): amount of zero-padding around the fm pooling
window edge. Defaults to 0.
pad_d (int, optional): amount of zero-padding around the depth edge
Defaults to 0.
pad_h (int, optional): amount of zero-padding around the height edge
Defaults to 0.
pad_w (int, optional): amount of zero-padding around the width edge
Defaults to 0.
str_d (int, optional): factor to step the filters by in the fm
pooling window direction. Defaults to 1
str_d (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_h (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_w (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
raise NotImplementedError()
def fprop_pool(self, layer, I, O):
"""
Forward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object, different backends have
different pool layers.
I (Tensor): Input tensor.
O (Tensor): output tensor.
"""
raise NotImplementedError()
def bprop_pool(self, layer, I, E, grad_I):
"""
Backward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object. Different backends have
different pool layers.
I (Tensor): Input tensor.
E (Tensor): Error tensor.
grad_I (Tensor): Gradient tensor (delta)
"""
raise NotImplementedError()
def compound_bprop_lut(self, nin, inputs, error, error_t, dW, pad_idx, alpha=1.0, beta=0):
"""
Backward propagate lookup table layer.
Arguments:
nin (integer): Number of input word_ids.
inputs (Tensor): Input tensor.
error (Tensor): Error tensor.
error_t (Tensor): Transposed error tensor.
dW (Tensor): Gradient tensor (delta).
pad_idx (integer):
alpha (float):
beta (float):
"""
raise NotImplementedError()
# For constructing an op tree used in lazy evaluation
class OpTreeNode(tuple):
"""
An OpTreeNode is a tuple of length 3. The first element is a dict
specifying the operation, and the second and third elements specify the
operands. From an op-tree's tree perspective, think about the 3
elements as 3 nodes. The second and third element are the left and right
child of the first element.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __str__(self):
s = '(' + str(self[0])
s += ', '
if isinstance(self[1], Tensor):
if self[1].name and self[1].name is not None:
s += self[1].name
else:
s += 'tensor-' + hex(id(self[1]))
else:
s += str(self[1])
s += ', '
if isinstance(self[2], Tensor):
if self[2].name and self[2].name is not None:
s += self[2].name
else:
s += 'tensor-' + hex(id(self[2]))
else:
s += str(self[2])
s += ')'
return s
def __repr__(self):
return self.__str__()
def key(self):
"""
Returns a key for identifying the optree. The key is depended on the ops
and the id of the tensors. Since __eq__ is overloaded, need to manage
the hashing of the OpTreeNode manually.
Returns:
tuple: optree key
"""
stack = self.traverse(list())
for i in range(len(stack)):
if type(stack[i]) is dict:
if 'axis' in stack[i]:
stack[i] = (stack[i]['op'], stack[i]['axis'])
else:
stack[i] = (stack[i]['op'])
return tuple(stack)
def intrinsic_key_maps(self):
"""
Returns the intrinsic key, tensor_index_map and index_tensor_map
for the purpose of identifying a optree. The key is depended on the ops
tensors dimensions and the relaion among the tensors.
x0 * x1 + x0 * x2 will have the same intrinsic key as y0 * y1 + y0 * y2,
if xi and yi have the same shape.
In tensor_index_map and index_tensor_map, tensors has a one-to-one
mapping with indices. The index of the tensor is depended on the first
occurance of the tensor in the post-order traversal of the optree.
Returns:
(intrinsic_key, tensor_index_map, index_tensor_map)
"""
stack = self.traverse(list())
tensor_index = 0
tensor_index_map = {}
index_tensor_map = {}
for i in range(len(stack)):
if type(stack[i]) is dict:
if 'axis' in stack[i]:
stack[i] = (stack[i]['op'], stack[i]['axis'])
else:
stack[i] = (stack[i]['op'])
elif isinstance(stack[i], Tensor):
# use interger to replace tensor
if stack[i] in tensor_index_map:
stack[i] = (tensor_index_map[stack[i]], stack[i].shape)
else:
# put tensor in dict
tensor_index_map[stack[i]] = tensor_index
index_tensor_map[tensor_index] = stack[i]
stack[i] = (tensor_index, stack[i].shape)
tensor_index += 1
return (tuple(stack), tensor_index_map, index_tensor_map)
@staticmethod
def build(op, a, b, out=None, **kwargs):
"""
Build OpTreeNode.
Arguments:
a (OpTreeNode, Tensor, numeric): left-hand side operand.
b (OpTreeNode, Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
not None, the op-tree will be executed.
kwargs: optional argument such as axis of the reducion.
"""
# check type
for arg in (a, b):
if not isinstance(arg, (int, float, Tensor, OpTreeNode, type(None))):
return NotImplemented
# get shape
out_shape = [1, 1]
if isinstance(a, (OpTreeNode, Tensor)):
a_shape = a.shape
elif isinstance(a, (float, int)):
a_shape = [1, 1]
else:
a_shape = [0, 0]
if isinstance(b, (OpTreeNode, Tensor)):
b_shape = b.shape
elif isinstance(b, (float, int)):
b_shape = [1, 1]
else:
b_shape = [0, 0]
# TODO: fix shape in smarter way
if len(a_shape) == 1:
a_shape = a_shape + (1,)
if len(b_shape) == 1:
b_shape = b_shape + (1,)
if op in OpCollection.ew_ops:
for i in range(2):
out_shape[i] = max(a_shape[i], b_shape[i])
elif op in OpCollection.reduction_ops:
if "axis" in kwargs:
out_shape = list(a_shape)
out_shape[kwargs["axis"]] = 1
else:
pass # [1, 1]
elif op == "assign":
out_shape = a_shape
elif op == "dot":
assert (len(a_shape) == len(b_shape) and len(b_shape) == 2 and
a_shape[1] == b_shape[0])
out_shape = (a_shape[0], b_shape[1])
elif op == "transpose":
assert b is None
out_shape = tuple(reversed(a_shape))
else:
raise TypeError("%s is not a valid operation" % op)
out_shape = tuple(out_shape)
# build op dict
op_dict = {"op": op, "shape": out_shape}
op_dict.update(kwargs)
node = OpTreeNode(op_dict, a, b)
# execute explicit assignment
if op == "assign":
return node.execute()
# passing in an out value counts as assignment
if out is not None:
return OpTreeNode({"op": "assign"}, out, node).execute()
# delay execution until assignment
return node
def execute(self):
"""
Execute the optree. When calling `execute()`, there must be one and only
one `assign` operation at the very top of the op-tree. The corresponding
backend's execute function will be called.
"""
assert(self[0]["op"] == "assign")
backend = self[1].backend
if isinstance(backend, Backend):
return backend.execute(self)
else:
raise NotImplementedError()
def traverse(self, stack):
"""
Post order walk op tree and produce postfix stack
Arguments:
stack (list): user shall give empty list like `list()`, then it's
used recursively to construct the post-order stack.
"""
# Left
if isinstance(self[1], OpTreeNode):
self[1].traverse(stack)
elif self[1] is not None:
stack.append(self[1])
# Right
if isinstance(self[2], OpTreeNode):
self[2].traverse(stack)
elif self[2] is not None:
stack.append(self[2])
stack.append(self[0])
return stack
@property
def T(self):
return OpTreeNode.build("transpose", self, None)
def transpose(self, out=None):
"""
Return a transposed view of the data.
"""
if out:
return OpTreeNode.build("assign", out, self.T)
return self.T
@staticmethod
def optree_to_list(optree):
"""
convert optree to list of lists recursively
"""
if isinstance(optree, OpTreeNode):
return list(map(OpTreeNode.optree_to_list, optree))
else:
return optree
@staticmethod
def list_to_optree(l):
"""
convert list to optree recursively
"""
if isinstance(l, list):
return OpTreeNode(*map(OpTreeNode.list_to_optree, l))
else:
return l
@property
def shape(self):
"""
return the shape of the OpTreeNode
"""
if isinstance(self, OpTreeNode):
return self[0]['shape']
if isinstance(self, Tensor):
return self.shape
# scalar
return (1, 1)
@staticmethod
def _pretty_print(node):
operators = {'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'pow': '**'}
s = ''
if isinstance(node, Tensor):
if node.name:
s = node.name
else:
s = 'tensor-' + hex(id(node))
elif isinstance(node, OpTreeNode):
if node[2]:
s += OpTreeNode._pretty_print(node[1]) + ' '
if node[0]['op'] in operators:
s += operators[node[0]['op']]
else:
s += node[0]['op']
s += ' ' + OpTreeNode._pretty_print(node[2])
else:
s = node[0]['op'] + ' ' + OpTreeNode._pretty_print(node[1])
s = '(' + s + ')'
else:
s = str(node) # TODO
s = '(' + s + ')'
return s
def pp(self):
"""
Pretty print of the optree
Arguments:
node (OpTreeNode): the top node of the op-tree to print
Returns:
str: string representation of the op-tree
"""
return OpTreeNode._pretty_print(self)
def asnumpyarray(self):
"""
Returns the evaluated value of the optree as a host numpy.ndarray.
Allocates new memory, usually used for debug.
Returns:
numpy.ndarray: evaluated value
"""
return self.astensor().get()
def astensor(self):
"""
Returns the evaluated value of the optree as a Tensor.
Allocates new memory, usually used for debug.
Returns:
Tensor: evaluated value
"""
stack = self.traverse(list())
be = None
for s in stack:
if isinstance(s, Tensor):
be = s.backend
break
if be is None:
raise ValueError("No tensor object in op_tree")
buf = be.empty(self.shape)
buf[:] = self
return buf
def __add__(self, other):
return self.build("add", self, other)
def __sub__(self, other):
return self.build("sub", self, other)
def __mul__(self, other):
return self.build("mul", self, other)
def __div__(self, other):
return self.build("div", self, other)
def __truediv__(self, other):
return self.build("div", self, other)
def __pow__(self, other):
return self.build("pow", self, other)
def __radd__(self, other):
return self.build("add", other, self)
def __rsub__(self, other):
return self.build("sub", other, self)
def __rmul__(self, other):
return self.build("mul", other, self)
def __rdiv__(self, other):
return self.build("div", other, self)
def __rtruediv__(self, other):
return self.build("div", other, self)
def __rpow__(self, other):
return self.build("pow", other, self)
def __eq__(self, other):
return self.build("eq", self, other)
def __ne__(self, other):
return self.build("ne", self, other)
def __lt__(self, other):
return self.build("lt", self, other)
def __le__(self, other):
return self.build("le", self, other)
def __gt__(self, other):
return self.build("gt", self, other)
def __ge__(self, other):
return self.build("ge", self, other)
def __abs__(self):
return self.build("abs", self, None)
def __neg__(self):
return self.build("neg", self, None)
| apache-2.0 | 6,893,616,573,040,554,000 | 37.947764 | 94 | 0.535217 | false |
amlyj/pythonStudy | 3.6/AI/ML/DL/study_face_recognition/facerec_from_video_file_faster.py | 1 | 3084 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/12/27 11:47
# @Author : TOM.LEE
import cv2
import face_recognition
video_capture = cv2.VideoCapture("../videos/hamilton_clip.mp4")
# Load a sample picture and learn how to recognize it.
# Load some sample pictures and learn how to recognize them.
lmm_image = face_recognition.load_image_file("../images/lin-manuel-miranda.png")
lmm_face_encoding = face_recognition.face_encodings(lmm_image)[0]
al_image = face_recognition.load_image_file("../images/alex-lacamoire.png")
al_face_encoding = face_recognition.face_encodings(al_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
lmm_face_encoding,
al_face_encoding
]
known_face_names = [
"Lin-Manuel Miranda",
"Alex Lacamoire"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
| mit | -3,399,066,312,801,603,600 | 32.89011 | 101 | 0.651751 | false |
sumihai-tekindo/account_sicepat | invoice_paid_report/report/__init__.py | 1 | 1053 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 STI (<https://github.com/sumihai-tekindo>).
# @author Pambudi Satria <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import invoice_paid_report | gpl-3.0 | 4,524,028,460,565,847,600 | 45.954545 | 78 | 0.594492 | false |
gwh59/cloud-custodian | c7n/resources/cache.py | 1 | 1067 | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n.query import QueryResourceManager
from c7n.manager import resources
@resources.register('cache-cluster')
class CacheCluster(QueryResourceManager):
resource_type = 'aws.elasticache.cluster'
@resources.register('cache-subnet-group')
class ClusterSubnetGroup(QueryResourceManager):
resource_type = 'aws.elasticache.subnet-group'
@resources.register('cache-snapshot')
class CacheSnapshot(QueryResourceManager):
resource_type = 'aws.elasticache.snapshot'
| apache-2.0 | -6,840,067,951,317,144,000 | 29.485714 | 74 | 0.778819 | false |
prior/webinars | webinars_web/webinars/management/commands/seed.py | 1 | 2093 | from django.core.management.base import BaseCommand
from optparse import make_option
from webinars_web.webinars import models as wm
from uuid import uuid4
from webex.attendee import Attendee as WebexRegistrant
import hapi_plus.leads
from django.conf import settings
class Command(BaseCommand):
help = 'Seeds registrant data for an event'
option_list = BaseCommand.option_list + (
make_option('-e', '--event', type='int', dest='event_id', help=
'The local id for a specific webinar event to seed.'),
make_option('-w', '--webex_count', type='int', dest='webex_count', help=
'Number of Webex registrants to seed on this event'),
make_option('-s', '--hubspot_count', type='int', dest='hubspot_count', help=
'Number of HubSpot registrants to seed on this event') )
def handle(self, *args, **options):
event_id = options.get('event_id')
webex_count = options.get('webex_count') or 0
hubspot_count = options.get('hubspot_count') or 0
event = wm.Event.objects.get(pk=event_id)
print "bulk inserting %s webex registrants" % webex_count
webex_event = event.webex_event
event.webex_event.create_registrants(WebexRegistrant.random(webex_event, webex_count))
leads_client = hapi_plus.leads.LeadsClient(settings.HUBSPOT_API_KEY, hub_id=event.hub.id, env=settings.API_ENV, timeout=20)
print "incrementally inserting %s hubspot registrants" % hubspot_count
for i in xrange(hubspot_count):
form_values = []
form_values.append({'fieldLabel':'Email Address', 'fieldName':'Email', 'fieldValue': ('%s@%s.com' % (str(uuid4())[:8], str(uuid4())[:8]))})
form_values.append({'fieldLabel':'First Name', 'fieldName':'FirstName', 'fieldValue': str(uuid4())[:8]})
form_values.append({'fieldLabel':'Last Name', 'fieldName':'LastName', 'fieldValue': str(uuid4())[:8]})
leads_client.create_lead(event.update_cms_form.guid, form_values)
print "inserted %s hubspot registrants" % i
| apache-2.0 | -8,942,040,098,241,600,000 | 48.833333 | 151 | 0.652174 | false |
tyrchen/church | church/views/users.py | 1 | 4675 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from dateutil.parser import parse
import json
import logging
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseBadRequest
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView, View
import requests
from settings import API_SERVER
__author__ = 'tchen'
logger = logging.getLogger(__name__)
last_updater_ignore = ['gnats', 'slt-builder', 'JASMINE Notification <', 'builder']
class UserView(TemplateView):
template_name = 'church/user.html'
def get_user(self, uid):
return requests.get(API_SERVER + '/directory/employees/%s.json' % uid).json()
def action_required(self, item):
updater = item.get('last_updater', '')
modified = parse(item['modified_at']).replace(tzinfo=None)
now = datetime.now()
if updater not in last_updater_ignore and updater != item['dev_owner'] and (now - modified).days < 5 and \
item['responsible'] == item['dev_owner']:
return True
return False
def get_pr_list(self, uid):
data = requests.get(API_SERVER + '/gnats/%s.json' % uid).json()
action_required_issues = []
new_issues = []
working_issues = []
info_issues = []
done_issues = []
for item in data:
if self.action_required(item):
action_required_issues.append(item)
elif item['state'] == 'open':
new_issues.append(item)
elif item['responsible'] == uid:
working_issues.append(item)
elif item['state'] == 'feedback' or item['state'] == 'monitored':
done_issues.append(item)
else:
info_issues.append(item)
return [
('Action Required Iusses', action_required_issues),
('Open Issues', new_issues),
('Working Issues', working_issues),
('Info Issues', info_issues),
('Done Issues (Monitored, Feedback)', done_issues)
]
def get_context_data(self, **kwargs):
uid = self.kwargs['text']
issue_lists = self.get_pr_list(uid)
user = self.get_user(uid)
context = super(UserView, self).get_context_data(**kwargs)
context['issue_lists'] = issue_lists
context['engineer'] = user
context['total'] = sum(map(lambda x: len(x[1]), issue_lists))
return context
def post(self, request, *args, **kwargs):
uid = self.kwargs['text']
user = self.get_user(uid)
number = request.POST.get('pk')
if request.user.username == uid:
name = request.POST.get('name')
value = request.POST.get('value')
url = API_SERVER + '/gnats/issues/%s.json' % number
data = {'name': name, 'value': value}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.put(url, data=json.dumps(data), headers=headers)
if r.status_code == 200:
if name == 'comment':
# user updated the comment, so we add a progress record
progress_url = API_SERVER + '/gnats/progresses/%s.json' % number
data = {'uid': uid, 'progress': value, 'team': user['team']}
r = requests.post(progress_url, data=json.dumps(data), headers=headers)
return HttpResponse('{}')
else:
return HttpResponseBadRequest('Cannot update PR %s' % number)
return HttpResponseBadRequest('Cannot update PR %s' % number)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(UserView, self).dispatch(*args, **kwargs)
class UserAddWorkingPRView(View):
url = 'http://scrapy.jcnrd.us/schedule.json'
def post(self, request, *args, **kwargs):
items = request.POST.get('items')
if request.user.username == self.kwargs['text']:
items = map(lambda x: x.strip(), items.split(','))
for item in items:
payload = {'project': 'gnats', 'spider': 'worker_pr', 'uid': request.user.username, 'number': item}
requests.post(self.url, payload)
return HttpResponse(json.dumps({'status': 'ok'}))
return HttpResponseBadRequest('Cannot update PR %s' % items)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(UserAddWorkingPRView, self).dispatch(*args, **kwargs)
| mit | 3,288,241,117,156,940,000 | 37.00813 | 115 | 0.589947 | false |
dennishuo/dataproc-initialization-actions | oozie/test_oozie.py | 1 | 1805 | import os
import unittest
from parameterized import parameterized
from integration_tests.dataproc_test_case import DataprocTestCase
class OozieTestCase(DataprocTestCase):
COMPONENT = 'oozie'
INIT_ACTIONS = ['oozie/oozie.sh']
TEST_SCRIPT_FILE_NAME = 'validate.sh'
def verify_instance(self, name):
test_script_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
self.TEST_SCRIPT_FILE_NAME)
self.upload_test_file(test_script_path, name)
self.__run_test_file(name)
self.remove_test_script(self.TEST_SCRIPT_FILE_NAME, name)
def __run_test_file(self, name):
self.assert_instance_command(
name, "bash {}".format(self.TEST_SCRIPT_FILE_NAME))
@parameterized.expand(
[
("SINGLE", "1.1", ["m"]),
("SINGLE", "1.2", ["m"]),
("SINGLE", "1.3", ["m"]),
("STANDARD", "1.1", ["m"]),
("STANDARD", "1.2", ["m"]),
("STANDARD", "1.3", ["m"]),
("HA", "1.1", ["m-0", "m-1", "m-2"]),
("HA", "1.2", ["m-0", "m-1", "m-2"]),
("HA", "1.3", ["m-0", "m-1", "m-2"]),
],
testcase_func_name=DataprocTestCase.generate_verbose_test_name)
def test_oozie(self, configuration, dataproc_version, machine_suffixes):
self.createCluster(configuration,
self.INIT_ACTIONS,
dataproc_version,
machine_type="n1-standard-4",
boot_disk_size="200GB")
for machine_suffix in machine_suffixes:
self.verify_instance("{}-{}".format(self.getClusterName(),
machine_suffix))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,328,313,559,347,165,000 | 34.392157 | 76 | 0.516343 | false |
TheLazyHase/dragon_dice_simulator | controller/user/authentication_controller.py | 1 | 1910 | # -*- coding: utf-8 *-*
# Copyright (c) 2013 Tisserant Pierre
#
# This file is part of Dragon dice simulator.
#
# Dragon dice simulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragon dice simulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Dragon dice simulator. If not, see <http://www.gnu.org/licenses/>.
from pyramid.view import view_config,forbidden_view_config
from pyramid.security import remember, forget
from controller import BaseController
from pyramid.httpexceptions import HTTPFound
from business.user import User
class ArmiesController(BaseController):
@view_config(route_name='authentication')
def authentication(self):
user_id = int(self.request.POST['id'])
user = User(user_id)
headers = remember(self.request, user.id)
url = self.request.route_url('army_selection')
return HTTPFound(location=url, headers = headers)
@view_config(route_name='login', renderer='controller.user:templates/login.mako')
@forbidden_view_config(renderer='controller.user:templates/failed_login.mako')
def login(self):
return {'authentication_route': self.request.route_url('authentication'), 'user_ids': range(5)}
@view_config(route_name='logout')
def logout(self):
headers = forget(self.request, user.id)
url = self.request.route_url('login')
return HTTPFound(location=url, headers = headers)
| gpl-3.0 | 2,533,934,773,703,738,400 | 41.444444 | 103 | 0.719372 | false |
veprbl/wikichords | show.py | 1 | 9484 | #!/usr/bin/env python
from common import *
from time import time, ctime
session_start()
urlname = args[1]
res = con.execute(
"SELECT id, band, name, contents, variant FROM pages WHERE urlname = ? LIMIT 1;",
(urlname,)).fetchall()
if len(res) == 0:
fault404()
else:
(pid, band, name, contents, variant) = res[0]
if has_param('text'):
if get_uid() == 0:
fault403()
newtext = get_param('text')
newtext = ''.join(map(lambda s: s.rstrip() + "\n", newtext.splitlines()))
res = con.execute("SELECT date, author FROM variants WHERE date > ? AND page = ? ORDER BY date DESC LIMIT 1;", (int(time()) - 3600, pid)).fetchall()
con.execute("BEGIN TRANSACTION;")
if len(res) > 0 and res[0][1] == get_uid():
(date, _) = res[0]
con.execute("UPDATE variants SET date = ?, text = ? WHERE page = ? AND date = ?;", \
(int(time()), newtext, pid, date))
con.execute("UPDATE pages SET contents = ? WHERE id = ?;", \
(newtext, pid))
else:
res = con.execute(
"INSERT INTO variants (page, date, author, text) VALUES (?, ?, ?, ?)", \
(pid, int(time()), get_uid(), newtext)
)
con.execute("UPDATE pages SET contents = ?, variant = ? WHERE id = ?;", \
(newtext, res.lastrowid, pid))
con.commit()
locate("/song/%s" % urlname)
vartime = 0
if len(args) in [3, 4] and (args[2] != 'edit' and args[2] != 'rename'):
vartime = int(args[2])
res = con.execute(
"SELECT id, text FROM variants WHERE page = ? AND date = ? LIMIT 1;",
(pid, vartime)).fetchall()
if len(res) == 0:
fault404()
else:
(vid, contents) = res[0]
if len(args) == 4 and args[3] == 'rollback':
if get_uid() == 0:
fault403()
con.execute("BEGIN TRANSACTION;")
con.execute("DELETE FROM variants WHERE date > ? AND page = ?;", \
(vartime, pid))
con.execute("UPDATE pages SET contents = ?, variant = ? WHERE id = ?;",\
(contents, vid, pid));
con.commit()
locate("/song/%s" % urlname)
if len(args) == 4 and args[2] == 'rename':
if get_uid() == 0:
fault403()
name = args[3]
urlname = mk_urlname(name)
con.execute("UPDATE pages SET name = ?, urlname = ? WHERE id = ?;", \
(name, urlname, pid))
con.commit()
locate("/song/%s" % urlname);
root = Element('root')
songtext = Element('songtext', { 'name' : name, 'href' : "/song/%s" % urlname, 'band' : getbandname(band), 'band_href' : "/band/%s" % getbandurlname(band) })
root.append(songtext)
def replace_node(line, find_proc, mk_node):
if type(line) == list:
result = []
for v in line:
if type(v) == str:
result += replace_node(v, find_proc, mk_node)
else:
result.append(v)
return result
(index, param, exclude_len) = find_proc(line)
if index >= 0:
return [line[:index], mk_node(param)] + \
replace_node(line[index+exclude_len:], find_proc, mk_node)
else:
return [line]
def parse_chords(line):
if line == '':
return []
def is_valid_chord_char(c):
return c.lower() in ['m', 's', 'u', 's', 'a', 'd',
'1', '2', '3', '4', '5', '6', '7', '9',
'#', '+', '-', '/', '(', ')']
if all(map(lambda c: is_valid_chord_char(c) or c.isspace() \
or c in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', '|'], line)):
chordline_node = Element('l')
def find_proc(line):
from genchord import parse_chord, notes
indexes = filter(lambda i: i >= 0,
map(lambda note: line.find(note), notes.keys()))
if len(indexes) == 0: return (-1, None, 0)
index = min(indexes)
parse_info = parse_chord(line[index:])
length = parse_info[0]
if parse_info is not None:
return (index, line[index:index+length], length)
unknown_node = Element('unknownchord')
unknown_node.text = line[index:l]
return (index, unknown_node, l-index)
def mk_node(param):
if type(param) in (str, unicode):
chord_node = Element('c')
chord_node.text = param
return chord_node
else:
return param
a = replace_node(line, find_proc, mk_node)
insert_nodelist(chordline_node, a)
return [chordline_node]
nodes = []
pos = line.find("|", 0) + 1
end = line.find("|", pos)
prev = 0
chordline = []
while end != -1:
try:
chord = line[pos:end]
chord_id = ['Am'].index(chord)
line = line[:pos-1] + line[end+1:]
chordline.append(" " * (pos - 2 - prev))
chord_node = Element('c')
chord_node.text = chord
chordline.append(chord_node)
end = pos
prev = pos + len(chord) - 2
except ValueError:
pass
pos = end + 1
end = line.find("|", pos)
if len(chordline) > 0:
chordline_node = Element('l')
insert_nodelist(chordline_node, chordline)
nodes.append(chordline_node)
text_node = Element('v')
text_node.text = line.replace("\t", "xxTABx")
nodes.append(text_node)
return nodes
def insert_nodelist(elem, nodelist):
f = True
for node in nodelist:
if type(node) in (str, unicode):
if f == True:
if elem.text is None:
elem.text = node
else:
elem.text += node
else:
f.tail = node
else:
if isinstance(node, getElementClass()):
f = node
elem.append(node)
else:
raise Exception(type(node))
def split_verses(line_array):
# Search for first non-empty line
start = 0
while start < len(line_array) and line_array[start] == '':
start += 1
if start == len(line_array):
return
# Search for the verse ending (empty line)
# Assuming that line_array is right stripped
try:
end = start + line_array[start:].index('')
except ValueError:
end = len(line_array)
verse_node = Element('verse')
songtext.append(verse_node)
insert_nodelist(verse_node,
sum(map(lambda s: parse_chords(s), line_array[start:end]), [])
)
if end != len(line_array):
split_verses(line_array[end:])
split_verses(map(lambda s: s.rstrip(), contents.splitlines()))
lastchanges = Element('lastchanges')
root.append(lastchanges)
res = con.execute("SELECT id, date, author FROM variants WHERE page = ?", \
(pid,))
for (vid, date, author) in res:
selected = date == vartime or (vid == variant and vartime == 0)
commit = Element('commit',
attrib = { 'author' : get_username(author),
'url' : "/song/%s/%i" % (urlname, date),
'rollback_url' : "/song/%s/%i/rollback" % (urlname, date),
'date' : ctime(date),
'selected' : "yes" if selected else "no"})
lastchanges.append(commit)
if len(args) == 3 and args[2] == 'edit':
editform = Element('editform', attrib = { 'id' : str(pid) })
editform.text = contents.replace('\r\n', '\n')
root.append(editform)
if has_param('file'):
if get_uid() == 0:
fault403()
fmt = {
'\x1b\x00T\x00u\x00x\x00G\x00u\x00i\x00t\x00a\x00r\x00 \x00F\x00i\x00l\x00e\x00 \x00F\x00o\x00r\x00m\x00a\x00t\x00' : 'tuxguitar',
'\x18FICHIER GUITAR PRO' : 'guitarpro'
}
fmt_ext = {
'tuxguitar' : ['.tg'],
'guitarpro' : ['.gtp', '.gp3', '.gp4', '.gp5']
}
fp = form['file'].file
maxlen = max(map(len, fmt.keys()))
fingerprint = fp.read(maxlen)
format = None
for f in fmt:
if fingerprint[:len(f)] == f:
format = fmt[f]
break
if format is None:
die("Unknown file format!")
ext = path.splitext(form['file'].filename)[1].lower()
if ext not in fmt_ext[format]:
die("Wrong file extension!")
fp.seek(0)
from os import path
# user_urlname = mk_urlname(get_username(get_uid())).replace('/', '')
filemask = "%s_%%02i%s" % (urlname, ext)
pathmask = "uploads/%s" % filemask
i = 0
while path.isfile(pathmask % i):
i += 1
file(pathmask % i, "w").write(fp.read())
con.execute("INSERT INTO files (page, author, filename) VALUES (?, ?, ?);", \
(pid, get_uid(), filemask % i))
con.commit()
locate('/song/%s' % urlname)
attachments_node = None
res = con.execute("SELECT author, filename FROM files WHERE page = ?;",
(pid,))
for (author, filename) in res:
if attachments_node is None:
attachments_node = Element('attachments')
root.append(attachments_node)
file_node = Element('file',
attrib = { 'author' : get_username(author),
'url' : "/uploads/%s" % filename })
attachments_node.append(file_node);
output_root(root)
| mit | 5,521,724,893,140,749,000 | 29.692557 | 158 | 0.512653 | false |
GoogleCloudPlatform/declarative-resource-client-library | python/services/apigee/environment.py | 1 | 8322 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.apigee import environment_pb2
from google3.cloud.graphite.mmv2.services.google.apigee import environment_pb2_grpc
from typing import List
class Environment(object):
def __init__(
self,
name: str = None,
description: str = None,
created_at: int = None,
last_modified_at: int = None,
properties: dict = None,
display_name: str = None,
state: str = None,
organization: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.properties = properties
self.display_name = display_name
self.organization = organization
self.service_account_file = service_account_file
def apply(self):
stub = environment_pb2_grpc.ApigeeEnvironmentServiceStub(channel.Channel())
request = environment_pb2.ApplyApigeeEnvironmentRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if EnvironmentProperties.to_proto(self.properties):
request.resource.properties.CopyFrom(
EnvironmentProperties.to_proto(self.properties)
)
else:
request.resource.ClearField("properties")
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.organization):
request.resource.organization = Primitive.to_proto(self.organization)
request.service_account_file = self.service_account_file
response = stub.ApplyApigeeEnvironment(request)
self.name = Primitive.from_proto(response.name)
self.description = Primitive.from_proto(response.description)
self.created_at = Primitive.from_proto(response.created_at)
self.last_modified_at = Primitive.from_proto(response.last_modified_at)
self.properties = EnvironmentProperties.from_proto(response.properties)
self.display_name = Primitive.from_proto(response.display_name)
self.state = EnvironmentStateEnum.from_proto(response.state)
self.organization = Primitive.from_proto(response.organization)
def delete(self):
stub = environment_pb2_grpc.ApigeeEnvironmentServiceStub(channel.Channel())
request = environment_pb2.DeleteApigeeEnvironmentRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if EnvironmentProperties.to_proto(self.properties):
request.resource.properties.CopyFrom(
EnvironmentProperties.to_proto(self.properties)
)
else:
request.resource.ClearField("properties")
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.organization):
request.resource.organization = Primitive.to_proto(self.organization)
response = stub.DeleteApigeeEnvironment(request)
@classmethod
def list(self, organization, service_account_file=""):
stub = environment_pb2_grpc.ApigeeEnvironmentServiceStub(channel.Channel())
request = environment_pb2.ListApigeeEnvironmentRequest()
request.service_account_file = service_account_file
request.Organization = organization
return stub.ListApigeeEnvironment(request).items
def to_proto(self):
resource = environment_pb2.ApigeeEnvironment()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if EnvironmentProperties.to_proto(self.properties):
resource.properties.CopyFrom(
EnvironmentProperties.to_proto(self.properties)
)
else:
resource.ClearField("properties")
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.organization):
resource.organization = Primitive.to_proto(self.organization)
return resource
class EnvironmentProperties(object):
def __init__(self, property: list = None):
self.property = property
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ApigeeEnvironmentProperties()
if EnvironmentPropertiesPropertyArray.to_proto(resource.property):
res.property.extend(
EnvironmentPropertiesPropertyArray.to_proto(resource.property)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentProperties(
property=EnvironmentPropertiesPropertyArray.from_proto(resource.property),
)
class EnvironmentPropertiesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EnvironmentProperties.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EnvironmentProperties.from_proto(i) for i in resources]
class EnvironmentPropertiesProperty(object):
def __init__(self, name: str = None, value: str = None):
self.name = name
self.value = value
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ApigeeEnvironmentPropertiesProperty()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.value):
res.value = Primitive.to_proto(resource.value)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentPropertiesProperty(
name=Primitive.from_proto(resource.name),
value=Primitive.from_proto(resource.value),
)
class EnvironmentPropertiesPropertyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EnvironmentPropertiesProperty.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EnvironmentPropertiesProperty.from_proto(i) for i in resources]
class EnvironmentStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return environment_pb2.ApigeeEnvironmentStateEnum.Value(
"ApigeeEnvironmentStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return environment_pb2.ApigeeEnvironmentStateEnum.Name(resource)[
len("ApigeeEnvironmentStateEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| apache-2.0 | -8,753,835,329,695,119,000 | 34.716738 | 86 | 0.66883 | false |
igorcoding/asynctnt | asynctnt/exceptions.py | 1 | 4098 | import enum
class TarantoolError(Exception):
"""
Base Tarantool Exception class
"""
pass
class TarantoolSchemaError(TarantoolError):
"""
Exception is raised when any problems with schema occurred
"""
pass
class TarantoolDatabaseError(TarantoolError):
"""
Exception is raised when Tarantool responds with code != 0
"""
def __init__(self, code, message):
super(TarantoolDatabaseError, self).__init__(code, message)
self.code = code
self.message = message
class TarantoolNetworkError(TarantoolError):
pass
class TarantoolNotConnectedError(TarantoolNetworkError):
"""
Raised when asynctnt is not connected to Tarantool
"""
pass
class ErrorCode(enum.IntEnum):
"""
Tarantool default error codes
"""
ER_UNKNOWN = 0
ER_ILLEGAL_PARAMS = 1
ER_MEMORY_ISSUE = 2
ER_TUPLE_FOUND = 3
ER_TUPLE_NOT_FOUND = 4
ER_UNSUPPORTED = 5
ER_NONMASTER = 6
ER_READONLY = 7
ER_INJECTION = 8
ER_CREATE_SPACE = 9
ER_SPACE_EXISTS = 10
ER_DROP_SPACE = 11
ER_ALTER_SPACE = 12
ER_INDEX_TYPE = 13
ER_MODIFY_INDEX = 14
ER_LAST_DROP = 15
ER_TUPLE_FORMAT_LIMIT = 16
ER_DROP_PRIMARY_KEY = 17
ER_KEY_PART_TYPE = 18
ER_EXACT_MATCH = 19
ER_INVALID_MSGPACK = 20
ER_PROC_RET = 21
ER_TUPLE_NOT_ARRAY = 22
ER_FIELD_TYPE = 23
ER_FIELD_TYPE_MISMATCH = 24
ER_SPLICE = 25
ER_ARG_TYPE = 26
ER_TUPLE_IS_TOO_LONG = 27
ER_UNKNOWN_UPDATE_OP = 28
ER_UPDATE_FIELD = 29
ER_FIBER_STACK = 30
ER_KEY_PART_COUNT = 31
ER_PROC_LUA = 32
ER_NO_SUCH_PROC = 33
ER_NO_SUCH_TRIGGER = 34
ER_NO_SUCH_INDEX = 35
ER_NO_SUCH_SPACE = 36
ER_NO_SUCH_FIELD = 37
ER_EXACT_FIELD_COUNT = 38
ER_INDEX_FIELD_COUNT = 39
ER_WAL_IO = 40
ER_MORE_THAN_ONE_TUPLE = 41
ER_ACCESS_DENIED = 42
ER_CREATE_USER = 43
ER_DROP_USER = 44
ER_NO_SUCH_USER = 45
ER_USER_EXISTS = 46
ER_PASSWORD_MISMATCH = 47
ER_UNKNOWN_REQUEST_TYPE = 48
ER_UNKNOWN_SCHEMA_OBJECT = 49
ER_CREATE_FUNCTION = 50
ER_NO_SUCH_FUNCTION = 51
ER_FUNCTION_EXISTS = 52
ER_FUNCTION_ACCESS_DENIED = 53
ER_FUNCTION_MAX = 54
ER_SPACE_ACCESS_DENIED = 55
ER_USER_MAX = 56
ER_NO_SUCH_ENGINE = 57
ER_RELOAD_CFG = 58
ER_CFG = 59
ER_VINYL = 60
ER_LOCAL_SERVER_IS_NOT_ACTIVE = 61
ER_UNKNOWN_SERVER = 62
ER_CLUSTER_ID_MISMATCH = 63
ER_INVALID_UUID = 64
ER_CLUSTER_ID_IS_RO = 65
ER_SERVER_ID_MISMATCH = 66
ER_SERVER_ID_IS_RESERVED = 67
ER_INVALID_ORDER = 68
ER_MISSING_REQUEST_FIELD = 69
ER_IDENTIFIER = 70
ER_DROP_FUNCTION = 71
ER_ITERATOR_TYPE = 72
ER_REPLICA_MAX = 73
ER_INVALID_XLOG = 74
ER_INVALID_XLOG_NAME = 75
ER_INVALID_XLOG_ORDER = 76
ER_NO_CONNECTION = 77
ER_TIMEOUT = 78
ER_ACTIVE_TRANSACTION = 79
ER_NO_ACTIVE_TRANSACTION = 80
ER_CROSS_ENGINE_TRANSACTION = 81
ER_NO_SUCH_ROLE = 82
ER_ROLE_EXISTS = 83
ER_CREATE_ROLE = 84
ER_INDEX_EXISTS = 85
ER_TUPLE_REF_OVERFLOW = 86
ER_ROLE_LOOP = 87
ER_GRANT = 88
ER_PRIV_GRANTED = 89
ER_ROLE_GRANTED = 90
ER_PRIV_NOT_GRANTED = 91
ER_ROLE_NOT_GRANTED = 92
ER_MISSING_SNAPSHOT = 93
ER_CANT_UPDATE_PRIMARY_KEY = 94
ER_UPDATE_INTEGER_OVERFLOW = 95
ER_GUEST_USER_PASSWORD = 96
ER_TRANSACTION_CONFLICT = 97
ER_UNSUPPORTED_ROLE_PRIV = 98
ER_LOAD_FUNCTION = 99
ER_FUNCTION_LANGUAGE = 100
ER_RTREE_RECT = 101
ER_PROC_C = 102
ER_UNKNOWN_RTREE_INDEX_DISTANCE_TYPE = 103
ER_PROTOCOL = 104
ER_UPSERT_UNIQUE_SECONDARY_KEY = 105
ER_WRONG_INDEX_RECORD = 106
ER_WRONG_INDEX_PARTS = 107
ER_WRONG_INDEX_OPTIONS = 108
ER_WRONG_SCHEMA_VERSION = 109
ER_SLAB_ALLOC_MAX = 110
ER_WRONG_SPACE_OPTIONS = 111
ER_UNSUPPORTED_INDEX_FEATURE = 112
ER_VIEW_IS_RO = 113
ER_SERVER_UUID_MISMATCH = 114
ER_SYSTEM = 115
ER_LOADING = 116
ER_CONNECTION_TO_SELF = 117
ER_KEY_PART_IS_TOO_LONG = 118
ER_COMPRESSION = 119
| apache-2.0 | 3,441,161,504,051,915,000 | 24.296296 | 67 | 0.624451 | false |
wxdwfc/security_lab | exploit-template.py | 1 | 1889 | #!/usr/bin/python
import sys
import socket
import traceback
import urllib
import struct
####
## You might find it useful to define variables that store various
## stack or function addresses from the zookd / zookfs processes,
## which you can then use in build_exploit(); the following are just
## examples.
stack_buffer = 0x34567890
stack_saved_ebp = 0x12345678
stack_retaddr = stack_saved_ebp + 4
## This is the function that you should modify to construct an
## HTTP request that will cause a buffer overflow in some part
## of the zookws web server and exploit it.
def build_exploit(shellcode):
## Things that you might find useful in constructing your exploit:
## urllib.quote(s)
## returns string s with "special" characters percent-encoded
## struct.pack("<I", x)
## returns the 4-byte binary encoding of the 32-bit integer x
## variables for program addresses (ebp, buffer, retaddr=ebp+4)
req = "GET /test HTTP/1.0\r\n" + \
"\r\n"
return req
####
def send_req(host, port, req):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to %s:%d..." % (host, port))
sock.connect((host, port))
print("Connected, sending request...")
sock.send(req)
print("Request sent, waiting for reply...")
rbuf = sock.recv(1024)
resp = ""
while len(rbuf):
resp = resp + rbuf
rbuf = sock.recv(1024)
print("Received reply.")
sock.close()
return resp
####
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " host port")
exit()
try:
shellfile = open("shellcode.bin", "r")
shellcode = shellfile.read()
req = build_exploit(shellcode)
print("HTTP request:")
print(req)
resp = send_req(sys.argv[1], int(sys.argv[2]), req)
print("HTTP response:")
print(resp)
except:
print("Exception:")
print(traceback.format_exc())
| mit | 4,728,255,385,788,143,000 | 24.186667 | 70 | 0.651668 | false |
dochevviktor/coding_challenges | DFCL2-KeyFinder.py | 1 | 2118 | import time
import sys
"""
Goal here is to decrypt cipher by brute force
Using bytes seemed to improve speed to a certain degree
"""
cipher =[0x5499fa99, 0x1ee7d8da, 0x5df0b78b, 0x1cb0c18c, 0x10f09fc5, 0x4bb7fdae, 0x7fcb95ac,
0xe494fbae, 0x8f5d90a3, 0xc766fdd7, 0xb7399ecc, 0xbf4af592, 0xf35c9dc2, 0x272be2a4,
0x5e788697, 0x520febd8, 0x468c808c, 0x2e550ac9, 0x2b4d28b7, 0x4c166789, 0x33df0bec,
0x67a96778, 0x0ffa0ce3, 0x44cd2a9a, 0x2dc208dc, 0x35c26a9d, 0x658b0fd7, 0x0d006482,
0x46c90cf8, 0x28d72a79, 0x4ea94be5, 0x1bbc6995, 0x478505d3, 0x7b1a6b8d, 0xaf7408db,
0xef7d7f9f, 0x76471cc6, 0xef1076b4, 0x6c911aa7, 0xe75a7ed3, 0x89630c8d, 0xf32b7fcb,
0x697c1e89, 0x091c30be, 0x736a4cbf, 0xe27339bb, 0x9a2a52a2]
text = [""]*46
try:
i1 = int(sys.argv[1])
except:
print ("A number from 0 to 3 needs to be specified as an argument (use DFCL2-Pypy Launcher.bat)!")
sys.exit()
answer = ""
test = "2074686520" # = (space)the(space)
flag = 0x0
# Deny Range (for now its easier to check, mmk ?)
deny = {"0"}
for i in xrange(0,31):
deny.add(hex(i).lstrip("0x"))
for i in xrange(121,256):
deny.add(hex(i).lstrip("0x"))
deny = frozenset(deny)
program_starts = time.time()
# 0x2710 = 10000
# 0x7530 = 30000
# 0xc350 = 50000
iter_print = 0xc350 + i1
while i1 < 0xFFFFFFFF:
if i1 % iter_print == 0:
#every n'th iteration, print progress and speed
now = time.time()
print("%.2f" % (((float(i1)+0.000001)/0xFFFFFFFF)*0x64))+"% - ",("%.2f" % (now - program_starts)) , "ms"
program_starts = time.time()
for i in xrange(0x2e,0x0,-0x1):
a = hex(((cipher[i-1] + i1) % 0x100000000)^cipher[i])[0x2:0xa]
# This will reject most keys that produce forbidden characters, but not all
if a in deny:
break
flag = 0x1
text[i-0x1] = a
if flag == 0x1:
if test in "".join(text):
f = open('test.txt', 'a')
f.write(str(i1)+"\n")
f.close()
print "Possible Keys are: "+str(i1)
flag << 0x4
i1+=4
print answer
| mit | 8,956,394,434,768,525,000 | 33.16129 | 112 | 0.629367 | false |
eriklindernoren/Keras-GAN | pixelda/data_loader.py | 1 | 3259 | import scipy
from glob import glob
import numpy as np
from keras.datasets import mnist
from skimage.transform import resize as imresize
import pickle
import os
import urllib
import gzip
class DataLoader():
"""Loads images from MNIST (domain A) and MNIST-M (domain B)"""
def __init__(self, img_res=(128, 128)):
self.img_res = img_res
self.mnistm_url = 'https://github.com/VanushVaswani/keras_mnistm/releases/download/1.0/keras_mnistm.pkl.gz'
self.setup_mnist(img_res)
self.setup_mnistm(img_res)
def normalize(self, images):
return images.astype(np.float32) / 127.5 - 1.
def setup_mnist(self, img_res):
print ("Setting up MNIST...")
if not os.path.exists('datasets/mnist_x.npy'):
# Load the dataset
(mnist_X, mnist_y), (_, _) = mnist.load_data()
# Normalize and rescale images
mnist_X = self.normalize(mnist_X)
mnist_X = np.array([imresize(x, img_res) for x in mnist_X])
mnist_X = np.expand_dims(mnist_X, axis=-1)
mnist_X = np.repeat(mnist_X, 3, axis=-1)
self.mnist_X, self.mnist_y = mnist_X, mnist_y
# Save formatted images
np.save('datasets/mnist_x.npy', self.mnist_X)
np.save('datasets/mnist_y.npy', self.mnist_y)
else:
self.mnist_X = np.load('datasets/mnist_x.npy')
self.mnist_y = np.load('datasets/mnist_y.npy')
print ("+ Done.")
def setup_mnistm(self, img_res):
print ("Setting up MNIST-M...")
if not os.path.exists('datasets/mnistm_x.npy'):
# Download the MNIST-M pkl file
filepath = 'datasets/keras_mnistm.pkl.gz'
if not os.path.exists(filepath.replace('.gz', '')):
print('+ Downloading ' + self.mnistm_url)
data = urllib.request.urlopen(self.mnistm_url)
with open(filepath, 'wb') as f:
f.write(data.read())
with open(filepath.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(filepath) as zip_f:
out_f.write(zip_f.read())
os.unlink(filepath)
# load MNIST-M images from pkl file
with open('datasets/keras_mnistm.pkl', "rb") as f:
data = pickle.load(f, encoding='bytes')
# Normalize and rescale images
mnistm_X = np.array(data[b'train'])
mnistm_X = self.normalize(mnistm_X)
mnistm_X = np.array([imresize(x, img_res) for x in mnistm_X])
self.mnistm_X, self.mnistm_y = mnistm_X, self.mnist_y.copy()
# Save formatted images
np.save('datasets/mnistm_x.npy', self.mnistm_X)
np.save('datasets/mnistm_y.npy', self.mnistm_y)
else:
self.mnistm_X = np.load('datasets/mnistm_x.npy')
self.mnistm_y = np.load('datasets/mnistm_y.npy')
print ("+ Done.")
def load_data(self, domain, batch_size=1):
X = self.mnist_X if domain == 'A' else self.mnistm_X
y = self.mnist_y if domain == 'A' else self.mnistm_y
idx = np.random.choice(list(range(len(X))), size=batch_size)
return X[idx], y[idx]
| mit | -2,877,848,713,173,794,000 | 33.305263 | 115 | 0.562136 | false |
ActiveState/code | recipes/Python/498106_Convert_formulstring_implied_multiplicatiproper_/recipe-498106.py | 1 | 1913 | def toProperFormula(s):
"""
Given formula string, returns a modified formula with missing
multiplication symbols and grouping symbols [],{} replaced by parentheses.
Only primitive error checking for mismatched grouping symbols is shown in
this recipe.
author: [email protected], [email protected]
"""
import tokenize
from cStringIO import StringIO
f = StringIO(s)
# Make variables mutable to child function.
formula = [""]
prevtoken = [""]
prevsymbol = [""]
closers = []
def handle_token(type, token, (srow, scol), (erow, ecol), line):
token = str(token)
symbol = tokenize.tok_name[type]
if symbol == "OP":
if token == ")":
if closers.pop() != "(": raise FormulaError('Error: "' +line[:ecol] + '" unbalanced ).')
elif token == "]":
if closers.pop() != "[": raise FormulaError('Error: "' +line[:ecol] + '" unbalanced ].')
token = ")"
elif token == "}":
if closers.pop() != "{": raise FormulaError('Error: "' +line[:ecol] + '" unbalanced }.')
token = ")"
elif token in ["(", "[", "{"]:
closers.append(token)
if prevtoken[0] == ")" or prevsymbol[0] == "NUMBER":
formula[0] += "*"
token = "("
elif symbol in ["NAME", "NUMBER"]:
if prevtoken[0] == ")" or prevsymbol[0] in ["NAME", "NUMBER"]:
formula[0] += "*"
formula[0] += token
prevtoken[0] = token
prevsymbol[0] = symbol
tokenize.tokenize(f.readline, handle_token)
return formula[0]
print toProperFormula("2 ( 23.45x - 4y) [34 - 5 x] + w^[atan2(2y, 4x)] 5")
"""
2*(23.45*x-4*y)*(34-5*x)+w^(atan2(2*y,4*x))*5
"""
| mit | -3,109,650,680,688,000,000 | 32.561404 | 106 | 0.500261 | false |
mattilyra/gensim | gensim/downloader.py | 1 | 14126 | """
This module is an API for downloading, getting information and loading datasets/models.
Give information about available models/datasets:
>>> import gensim.downloader as api
>>>
>>> api.info() # return dict with info about available models/datasets
>>> api.info("text8") # return dict with info about "text8" dataset
Model example:
>>> import gensim.downloader as api
>>>
>>> model = api.load("glove-twitter-25") # load glove vectors
>>> model.most_similar("cat") # show words that similar to word 'cat'
Dataset example:
>>> import gensim.downloader as api
>>> from gensim.models import Word2Vec
>>>
>>> dataset = api.load("text8") # load dataset as iterable
>>> model = Word2Vec(dataset) # train w2v model
Also, this API available via CLI::
python -m gensim.downloader --info <dataname> # same as api.info(dataname)
python -m gensim.downloader --download <dataname> # same as api.load(dataname, return_path=True)
"""
from __future__ import absolute_import
import argparse
import os
import json
import logging
import sys
import errno
import hashlib
import math
import shutil
import tempfile
from functools import partial
if sys.version_info[0] == 2:
import urllib
from urllib2 import urlopen
else:
import urllib.request as urllib
from urllib.request import urlopen
user_dir = os.path.expanduser('~')
base_dir = os.path.join(user_dir, 'gensim-data')
logger = logging.getLogger('gensim.api')
DATA_LIST_URL = "https://raw.githubusercontent.com/RaRe-Technologies/gensim-data/master/list.json"
DOWNLOAD_BASE_URL = "https://github.com/RaRe-Technologies/gensim-data/releases/download"
def _progress(chunks_downloaded, chunk_size, total_size, part=1, total_parts=1):
"""Reporthook for :func:`urllib.urlretrieve`, code from [1]_.
Parameters
----------
chunks_downloaded : int
Number of chunks of data that have been downloaded.
chunk_size : int
Size of each chunk of data.
total_size : int
Total size of the dataset/model.
part : int, optional
Number of current part, used only if `no_parts` > 1.
total_parts : int, optional
Total number of parts.
References
----------
[1] https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
"""
bar_len = 50
size_downloaded = float(chunks_downloaded * chunk_size)
filled_len = int(math.floor((bar_len * size_downloaded) / total_size))
percent_downloaded = round(((size_downloaded * 100) / total_size), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
if total_parts == 1:
sys.stdout.write(
'\r[%s] %s%s %s/%sMB downloaded' % (
bar, percent_downloaded, "%",
round(size_downloaded / (1024 * 1024), 1),
round(float(total_size) / (1024 * 1024), 1))
)
sys.stdout.flush()
else:
sys.stdout.write(
'\r Part %s/%s [%s] %s%s %s/%sMB downloaded' % (
part + 1, total_parts, bar, percent_downloaded, "%",
round(size_downloaded / (1024 * 1024), 1),
round(float(total_size) / (1024 * 1024), 1))
)
sys.stdout.flush()
def _create_base_dir():
"""Create the gensim-data directory in home directory, if it has not been already created.
Raises
------
Exception
An exception is raised when read/write permissions are not available or a file named gensim-data
already exists in the home directory.
"""
if not os.path.isdir(base_dir):
try:
logger.info("Creating %s", base_dir)
os.makedirs(base_dir)
except OSError as e:
if e.errno == errno.EEXIST:
raise Exception(
"Not able to create folder gensim-data in {}. File gensim-data "
"exists in the direcory already.".format(user_dir)
)
else:
raise Exception(
"Can't create {}. Make sure you have the read/write permissions "
"to the directory or you can try creating the folder manually"
.format(base_dir)
)
def _calculate_md5_checksum(fname):
"""Calculate the checksum of the file, exactly same as md5-sum linux util.
Parameters
----------
fname : str
Path to the file.
Returns
-------
str
MD5-hash of file names as `fname`.
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def info(name=None, show_only_latest=True):
"""Provide the information related to model/dataset.
Parameters
----------
name : str, optional
Name of model/dataset. If not set - shows all available data.
show_only_latest : bool, optional
If storage contains different versions for one data/model, this flag allow to hide outdated versions.
Affects only if `name` is None.
Returns
-------
dict
Detailed information about one or all models/datasets.
If name is specified, return full information about concrete dataset/model,
otherwise, return information about all available datasets/models.
Raises
------
Exception
If name that has been passed is incorrect.
Examples
--------
>>> import gensim.downloader as api
>>> api.info("text8") # retrieve information about text8 dataset
{u'checksum': u'68799af40b6bda07dfa47a32612e5364',
u'description': u'Cleaned small sample from wikipedia',
u'file_name': u'text8.gz',
u'parts': 1,
u'source': u'http://mattmahoney.net/dc/text8.zip'}
>>>
>>> api.info() # retrieve information about all available datasets and models
"""
information = json.loads(urlopen(DATA_LIST_URL).read().decode("utf-8"))
if name is not None:
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]
elif name in models:
return information['models'][name]
else:
raise ValueError("Incorrect model/corpus name")
if not show_only_latest:
return information
return {
"corpora": {name: data for (name, data) in information['corpora'].items() if data.get("latest", True)},
"models": {name: data for (name, data) in information['models'].items() if data.get("latest", True)}
}
def _get_checksum(name, part=None):
"""Retrieve the checksum of the model/dataset from gensim-data repository.
Parameters
----------
name : str
Dataset/model name.
part : int, optional
Number of part (for multipart data only).
Returns
-------
str
Retrieved checksum of dataset/model.
"""
information = info()
corpora = information['corpora']
models = information['models']
if part is None:
if name in corpora:
return information['corpora'][name]["checksum"]
elif name in models:
return information['models'][name]["checksum"]
else:
if name in corpora:
return information['corpora'][name]["checksum-{}".format(part)]
elif name in models:
return information['models'][name]["checksum-{}".format(part)]
def _get_parts(name):
"""Retrieve the number of parts in which dataset/model has been split.
Parameters
----------
name: str
Dataset/model name.
Returns
-------
int
Number of parts in which dataset/model has been split.
"""
information = info()
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]["parts"]
elif name in models:
return information['models'][name]["parts"]
def _download(name):
"""Download and extract the dataset/model.
Parameters
----------
name: str
Dataset/model name which has to be downloaded.
Raises
------
Exception
If md5sum on client and in repo are different.
"""
url_load_file = "{base}/{fname}/__init__.py".format(base=DOWNLOAD_BASE_URL, fname=name)
data_folder_dir = os.path.join(base_dir, name)
data_folder_dir_tmp = data_folder_dir + '_tmp'
tmp_dir = tempfile.mkdtemp()
init_path = os.path.join(tmp_dir, "__init__.py")
urllib.urlretrieve(url_load_file, init_path)
total_parts = _get_parts(name)
if total_parts > 1:
concatenated_folder_name = "{fname}.gz".format(fname=name)
concatenated_folder_dir = os.path.join(tmp_dir, concatenated_folder_name)
for part in range(0, total_parts):
url_data = "{base}/{fname}/{fname}.gz_0{part}".format(base=DOWNLOAD_BASE_URL, fname=name, part=part)
fname = "{f}.gz_0{p}".format(f=name, p=part)
dst_path = os.path.join(tmp_dir, fname)
urllib.urlretrieve(
url_data, dst_path,
reporthook=partial(_progress, part=part, total_parts=total_parts)
)
if _calculate_md5_checksum(dst_path) == _get_checksum(name, part):
sys.stdout.write("\n")
sys.stdout.flush()
logger.info("Part %s/%s downloaded", part + 1, total_parts)
else:
shutil.rmtree(tmp_dir)
raise Exception("Checksum comparison failed, try again")
with open(concatenated_folder_dir, 'wb') as wfp:
for part in range(0, total_parts):
part_path = os.path.join(tmp_dir, "{fname}.gz_0{part}".format(fname=name, part=part))
with open(part_path, "rb") as rfp:
shutil.copyfileobj(rfp, wfp)
os.remove(part_path)
else:
url_data = "{base}/{fname}/{fname}.gz".format(base=DOWNLOAD_BASE_URL, fname=name)
fname = "{fname}.gz".format(fname=name)
dst_path = os.path.join(tmp_dir, fname)
urllib.urlretrieve(url_data, dst_path, reporthook=_progress)
if _calculate_md5_checksum(dst_path) == _get_checksum(name):
sys.stdout.write("\n")
sys.stdout.flush()
logger.info("%s downloaded", name)
else:
shutil.rmtree(tmp_dir)
raise Exception("Checksum comparison failed, try again")
if os.path.exists(data_folder_dir_tmp):
os.remove(data_folder_dir_tmp)
shutil.move(tmp_dir, data_folder_dir_tmp)
os.rename(data_folder_dir_tmp, data_folder_dir)
def _get_filename(name):
"""Retrieve the filename of the dataset/model.
Parameters
----------
name: str
Name of dataset/model.
Returns
-------
str:
Filename of the dataset/model.
"""
information = info()
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]["file_name"]
elif name in models:
return information['models'][name]["file_name"]
def load(name, return_path=False):
"""Download (if needed) dataset/model and load it to memory (unless `return_path` is set).
Parameters
----------
name: str
Name of the model/dataset.
return_path: bool, optional
If True, return full path to file, otherwise, return loaded model / iterable dataset.
Returns
-------
Model
Requested model, if `name` is model and `return_path` == False.
Dataset (iterable)
Requested dataset, if `name` is dataset and `return_path` == False.
str
Path to file with dataset / model, only when `return_path` == True.
Raises
------
Exception
Raised if `name` is incorrect.
Examples
--------
Model example:
>>> import gensim.downloader as api
>>>
>>> model = api.load("glove-twitter-25") # load glove vectors
>>> model.most_similar("cat") # show words that similar to word 'cat'
Dataset example:
>>> import gensim.downloader as api
>>>
>>> wiki = api.load("wiki-en") # load extracted Wikipedia dump, around 6 Gb
>>> for article in wiki: # iterate over all wiki script
>>> ...
Download only example
>>> import gensim.downloader as api
>>>
>>> print(api.load("wiki-en", return_path=True)) # output: /home/user/gensim-data/wiki-en/wiki-en.gz
"""
_create_base_dir()
file_name = _get_filename(name)
if file_name is None:
raise ValueError("Incorrect model/corpus name")
folder_dir = os.path.join(base_dir, name)
path = os.path.join(folder_dir, file_name)
if not os.path.exists(folder_dir):
_download(name)
if return_path:
return path
else:
sys.path.insert(0, base_dir)
module = __import__(name)
return module.load_data()
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s : %(name)s : %(levelname)s : %(message)s', stream=sys.stdout, level=logging.INFO
)
parser = argparse.ArgumentParser(
description="Gensim console API",
usage="python -m gensim.api.downloader [-h] [-d data_name | -i data_name | -c]"
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-d", "--download", metavar="data_name", nargs=1,
help="To download a corpus/model : python -m gensim.downloader -d <dataname>"
)
full_information = 1
group.add_argument(
"-i", "--info", metavar="data_name", nargs='?', const=full_information,
help="To get information about a corpus/model : python -m gensim.downloader -i <dataname>"
)
args = parser.parse_args()
if args.download is not None:
data_path = load(args.download[0], return_path=True)
logger.info("Data has been installed and data path is %s", data_path)
elif args.info is not None:
output = info() if (args.info == full_information) else info(name=args.info)
print(json.dumps(output, indent=4))
| lgpl-2.1 | -6,188,187,554,405,224,000 | 30.53125 | 112 | 0.606116 | false |
rycus86/docker-pygen | tests/test_generator.py | 1 | 4910 | import pygen
from unittest_helper import BaseDockerTestCase
class GeneratorTest(BaseDockerTestCase):
app = None
def tearDown(self):
super(GeneratorTest, self).tearDown()
if hasattr(self, 'app') and self.app:
self.app.api.close()
def test_generate(self):
test_container = self.start_container(environment=['GENERATOR=pygen'])
self.app = pygen.PyGen(template="""#
{% for container in containers %}
running: {{ container.name }} ID={{ container.short_id }}
{% for key, value in container.env.items() %}
env: {{ key }}=>{{ value }}
{% endfor %}
{% endfor %}""")
content = self.app.generate()
self.assertIn('running: %s' % test_container.name, content)
self.assertIn('ID=%s' % test_container.short_id, content)
self.assertIn('env: GENERATOR=>pygen', content)
def test_generate_with_groups(self):
self.start_container(environment=['GENERATOR=pygen'],
labels={'instance': '001',
'application': 'web'})
self.start_container(environment=['GENERATOR=pygen'],
labels={'instance': '002',
'application': 'web'})
self.start_container(environment=['GENERATOR=pygen'],
labels={'instance': '003',
'application': 'db'})
self.app = pygen.PyGen(template="""#
{% for key, containers in containers|groupby('labels.application') %}
group: {{ key }}
{% for container in containers %}
instance: {{ container.labels.instance }}
{% endfor %}
{% endfor %}""")
content = self.app.generate()
self.assertIn('group: web', content)
self.assertIn('group: db', content)
for num in range(1, 4):
self.assertIn('instance: %03d' % num, content)
def test_nginx_template(self):
self.start_container(name='pygen-test-nginx-1', labels={'virtual-host': 'test.site.com'}, ports={8080: None})
self.start_container(name='pygen-test-nginx-2', labels={'virtual-host': 'test.site.com'}, ports={8080: None})
self.start_container(name='pygen-test-nginx-3', labels={'virtual-host': 'www.site.com'}, ports={8080: None})
self.start_container(name='pygen-test-nginx-4', labels={'virtual-host': 'api.site.com',
'context-path': '/rest'}, ports={5000: None})
self.start_container(name='pygen-test-nginx-5', labels={'virtual-host': 'api.site.com',
'context-path': '/stream'}, ports={5000: None})
self.start_container(name='pygen-test-nginx-6', labels={'virtual-host': 'api.site.com',
'context-path': '/no-port-exposed'})
self.start_container(name='pygen-test-nginx-7', labels={'context-path': '/no-virtual-host'}, ports={9001: None})
self.app = pygen.PyGen(template=self.relative('templates/nginx.example'))
content = self.app.generate()
# pygen-test-nginx-1 : test.site.com/ 8080
self.assertIn('# pygen-test-nginx-1', content)
# pygen-test-nginx-2 : test.site.com/ 8080
self.assertIn('# pygen-test-nginx-2', content)
# pygen-test-nginx-3 : www.site.com/ 8080
self.assertIn('# pygen-test-nginx-3', content)
# pygen-test-nginx-4 : api.site.com/rest 5000
self.assertIn('# pygen-test-nginx-4', content)
# pygen-test-nginx-5 : api.site.com/stream 5000
self.assertIn('# pygen-test-nginx-5', content)
# pygen-test-nginx-6 : - /no-port-exposed
self.assertNotIn('pygen-test-nginx-6', content)
# pygen-test-nginx-7 : - /no-virtual-host 9001
self.assertNotIn('pygen-test-nginx-7', content)
for upstream in ('test.site.com___', 'www.site.com___', 'api.site.com___rest', 'api.site.com___stream'):
self.assertIn('upstream %s ' % upstream, content)
self.assertIn('proxy_pass http://%s;' % upstream, content)
self.assertNotIn('upstream api.site.com___ ', content)
self.assertIn('location / ', content)
self.assertIn('location /rest ', content)
self.assertIn('location /stream ', content)
for num in range(1, 6):
container = self.docker_client.containers.get('pygen-test-nginx-%d' % num)
ip_address = next(iter(container.attrs['NetworkSettings']['Networks'].values())).get('IPAddress')
port = next(iter(container.attrs['Config'].get('ExposedPorts', dict()).keys())).split('/')[0]
self.assertIn('server %s:%s;' % (ip_address, port), content)
| mit | -8,002,882,736,790,479,000 | 45.761905 | 120 | 0.556823 | false |
marcosfede/algorithms | tree/binary_tree/path_sum/path_sum/path_sum2.py | 1 | 1375 | def path_sum(root, sum):
if not root:
return []
res = []
DFS(root, sum, [], res)
return res
def DFS(root, sum, ls, res):
if not root.left and not root.right and root.val == sum:
ls.append(root.val)
res.append(ls)
if root.left:
DFS(root.left, sum - root.val, ls + [root.val], res)
if root.right:
DFS(root.right, sum - root.val, ls + [root.val], res)
# DFS with stack
def path_sum2(root, s):
if not root:
return []
res = []
stack = [(root, [root.val])]
while stack:
node, ls = stack.pop()
if not node.left and not node.right and sum(ls) == s:
res.append(ls)
if node.left:
stack.append((node.left, ls + [node.left.val]))
if node.right:
stack.append((node.right, ls + [node.right.val]))
return res
# BFS with queue
def path_sum3(root, sum):
if not root:
return []
res = []
queue = [(root, root.val, [root.val])]
while queue:
node, val, ls = queue.pop(0) # popleft
if not node.left and not node.right and val == sum:
res.append(ls)
if node.left:
queue.append((node.left, val + node.left.val, ls + [node.left.val]))
if node.right:
queue.append((node.right, val + node.right.val, ls + [node.right.val]))
return res
| gpl-3.0 | -5,353,832,707,150,959,000 | 26.5 | 83 | 0.537455 | false |
alerta/python-alerta | alertaclient/commands/cmd_heartbeats.py | 1 | 5322 | import json
import click
from tabulate import tabulate
from alertaclient.models.heartbeat import Heartbeat
from alertaclient.utils import origin
@click.command('heartbeats', short_help='List heartbeats')
@click.option('--alert', is_flag=True, help='Alert on stale or slow heartbeats')
@click.option('--severity', '-s', metavar='SEVERITY', default='major', help='Severity for heartbeat alerts')
@click.option('--timeout', metavar='SECONDS', type=int, help='Seconds before stale heartbeat alerts will be expired')
@click.option('--purge', is_flag=True, help='Delete all stale heartbeats')
@click.pass_obj
def cli(obj, alert, severity, timeout, purge):
"""List heartbeats."""
client = obj['client']
try:
default_normal_severity = obj['alarm_model']['defaults']['normal_severity']
except KeyError:
default_normal_severity = 'normal'
if severity in ['normal', 'ok', 'cleared']:
raise click.UsageError('Must be a non-normal severity. "{}" is one of {}'.format(
severity, ', '.join(['normal', 'ok', 'cleared']))
)
if severity not in obj['alarm_model']['severity'].keys():
raise click.UsageError('Must be a valid severity. "{}" is not one of {}'.format(
severity, ', '.join(obj['alarm_model']['severity'].keys()))
)
if obj['output'] == 'json':
r = client.http.get('/heartbeats')
heartbeats = [Heartbeat.parse(hb) for hb in r['heartbeats']]
click.echo(json.dumps(r['heartbeats'], sort_keys=True, indent=4, ensure_ascii=False))
else:
timezone = obj['timezone']
headers = {
'id': 'ID', 'origin': 'ORIGIN', 'customer': 'CUSTOMER', 'tags': 'TAGS', 'attributes': 'ATTRIBUTES',
'createTime': 'CREATED', 'receiveTime': 'RECEIVED', 'since': 'SINCE', 'timeout': 'TIMEOUT',
'latency': 'LATENCY', 'maxLatency': 'MAX LATENCY', 'status': 'STATUS'
}
heartbeats = client.get_heartbeats()
click.echo(tabulate([h.tabular(timezone) for h in heartbeats], headers=headers, tablefmt=obj['output']))
not_ok = [hb for hb in heartbeats if hb.status != 'ok']
if purge:
with click.progressbar(not_ok, label='Purging {} heartbeats'.format(len(not_ok))) as bar:
for b in bar:
client.delete_heartbeat(b.id)
if alert:
with click.progressbar(heartbeats, label='Alerting {} heartbeats'.format(len(heartbeats))) as bar:
for b in bar:
want_environment = b.attributes.pop('environment', 'Production')
want_severity = b.attributes.pop('severity', severity)
want_service = b.attributes.pop('service', ['Alerta'])
want_group = b.attributes.pop('group', 'System')
if b.status == 'expired': # aka. "stale"
client.send_alert(
resource=b.origin,
event='HeartbeatFail',
environment=want_environment,
severity=want_severity,
correlate=['HeartbeatFail', 'HeartbeatSlow', 'HeartbeatOK'],
service=want_service,
group=want_group,
value='{}'.format(b.since),
text='Heartbeat not received in {} seconds'.format(b.timeout),
tags=b.tags,
attributes=b.attributes,
origin=origin(),
type='heartbeatAlert',
timeout=timeout,
customer=b.customer
)
elif b.status == 'slow':
client.send_alert(
resource=b.origin,
event='HeartbeatSlow',
environment=want_environment,
severity=want_severity,
correlate=['HeartbeatFail', 'HeartbeatSlow', 'HeartbeatOK'],
service=want_service,
group=want_group,
value='{}ms'.format(b.latency),
text='Heartbeat took more than {}ms to be processed'.format(b.max_latency),
tags=b.tags,
attributes=b.attributes,
origin=origin(),
type='heartbeatAlert',
timeout=timeout,
customer=b.customer
)
else:
client.send_alert(
resource=b.origin,
event='HeartbeatOK',
environment=want_environment,
severity=default_normal_severity,
correlate=['HeartbeatFail', 'HeartbeatSlow', 'HeartbeatOK'],
service=want_service,
group=want_group,
value='',
text='Heartbeat OK',
tags=b.tags,
attributes=b.attributes,
origin=origin(),
type='heartbeatAlert',
timeout=timeout,
customer=b.customer
)
| mit | -4,965,624,064,348,664,000 | 44.487179 | 117 | 0.511086 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/en_debrid/rlsbb.py | 1 | 7811 | # -*- coding: utf-8 -*-
import re,urllib,urlparse
import traceback
from resources.lib.modules import log_utils, source_utils
from resources.lib.modules import client, rd_check
from resources.lib.modules import debrid, control
from resources.lib.sources import cfscrape
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['rlsbb.ru']
self.base_link = 'http://rlsbb.ru'
self.search_base_link = 'http://search.rlsbb.ru'
self.search_cookie = 'serach_mode=rlsbb'
self.search_link = '/lib/search526049.php?phrase=%s&pindex=1&content=true'
self.headers = {'User-Agent': client.agent()}
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
if debrid.status() is False: return
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
if debrid.status() is False: return
try:
if url is None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
premDate = ''
query = '%s %s S%02dE%02d' % (
data['tvshowtitle'], data['year'], int(data['season']), int(data['episode']))
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
query = query.replace("&", "and")
query = query.replace(" ", " ")
query = query.replace(" ", "-")
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
url = "http://rlsbb.ru/" + query
if 'tvshowtitle' not in data: url = url + "-1080p"
r = cfscrape.get(url, headers=self.headers).content
if r is None and 'tvshowtitle' in data:
season = re.search('S(.*?)E', hdlr)
season = season.group(1)
query = title
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
query = query + "-S" + season
query = query.replace("&", "and")
query = query.replace(" ", " ")
query = query.replace(" ", "-")
url = "http://rlsbb.ru/" + query
r = cfscrape.get(url, headers=self.headers).content
for loopCount in range(0, 2):
if loopCount == 1 or (r is None and 'tvshowtitle' in data):
premDate = re.sub('[ \.]','-',data['premiered'])
query = re.sub('[\\\\:;*?"<>|/\-\']', '', data['tvshowtitle'])
query = query.replace("&", " and ").replace(" ", " ").replace(" ", "-")
query = query + "-" + premDate
url = "http://rlsbb.ru/" + query
url = url.replace('The-Late-Show-with-Stephen-Colbert','Stephen-Colbert')
r = cfscrape.get(url, headers=self.headers).content
posts = client.parseDOM(r, "div", attrs={"class": "content"})
hostDict = hostprDict + hostDict
if control.setting('deb.rd_check') == 'true':
limit = 25
items = []
for index, post in enumerate(posts):
if index == limit:
break
try:
u = client.parseDOM(post, 'a', ret='href')
for i in u:
try:
name = str(i)
if hdlr in name.upper():
items.append(name)
elif len(premDate) > 0 and premDate in name.replace(".", "-"):
items.append(name)
except:
pass
except:
pass
if len(items) > 0:
break
else:
items = []
for post in posts:
try:
u = client.parseDOM(post, 'a', ret='href')
for i in u:
try:
name = str(i)
if hdlr in name.upper():
items.append(name)
elif len(premDate) > 0 and premDate in name.replace(".", "-"):
items.append(name)
except:
pass
except:
pass
if len(items) > 0:
break
seen_urls = set()
for item in items:
try:
info = []
url = str(item)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
if url in seen_urls:
continue
seen_urls.add(url)
host = url.replace("\\", "")
host2 = host.strip('"')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(host2.strip().lower()).netloc)[0]
if host not in hostDict:
raise Exception()
if any(x in host2 for x in ['.rar', '.zip', '.iso']):
continue
quality, info = source_utils.get_release_quality(host2, host2)
info = ' | '.join(info)
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if control.setting('deb.rd_check') == 'true':
check = rd_check.rd_deb_check(host2)
if check:
info = 'RD Checked' + ' | ' + info
sources.append(
{'source': host, 'quality': quality, 'language': 'en', 'url': check,
'info': info, 'direct': False, 'debridonly': True})
else:
sources.append(
{'source': host, 'quality': quality, 'language': 'en', 'url': host2,
'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check:
sources = check
return sources
except Exception:
failure = traceback.format_exc()
log_utils.log('---Rlsbb Testing - Exception: \n' + str(failure))
return sources
def resolve(self, url):
return url
| gpl-2.0 | -5,736,061,815,632,537,000 | 38.852041 | 109 | 0.416592 | false |
flavour/eden | modules/templates/historic/DRRPP/controllers.py | 12 | 31768 | # -*- coding: utf-8 -*-
import json
from os import path
from gluon import *
from gluon.storage import Storage
from s3 import *
THEME = "historic.DRRPP"
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
request = current.request
response = current.response
view = path.join(request.folder, "modules", "templates",
THEME, "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
# Show full width instead of login box if user is logged in
if current.auth.is_logged_in():
grid = "grid_12"
else:
grid = "grid_8"
latest_projects = DIV(_id="front-latest-body",
_class="%s alpha" % grid)
lappend = latest_projects.append
db = current.db
s3db = current.s3db
table = s3db.project_project
table_drrpp = s3db.project_drrpp
query = (table.deleted != True) & \
(table.approved_by != None)
rows = db(query).select(table.id,
table.name,
table_drrpp.activities,
table.organisation_id,
table.start_date,
left=table_drrpp.on(table.id == table_drrpp.project_id),
limitby=(0, 3))
project_ids = [r.project_project.id for r in rows]
ltable = s3db.project_location
gtable = s3db.gis_location
query = (ltable.deleted != True) & \
(ltable.project_id == table.id) & \
(gtable.id == ltable.location_id) & \
(gtable.level == "L0")
locations = db(query).select(ltable.project_id,
gtable.L0)
odd = True
for row in rows:
countries = [l.gis_location.L0 for l in locations if l.project_location.project_id == row.project_project.id]
location = ", ".join(countries)
if odd:
_class = "front-latest-item odd %s alpha" % grid
else:
_class = "front-latest-item even %s alpha" % grid
card = DIV(DIV(A(row.project_project.name,
_href=URL(c="project", f="project", args=[row.project_project.id])),
_class="front-latest-title %s" % grid,
),
DIV("Lead Organization: %s" % s3db.org_organisation_represent(row.project_project.organisation_id),
_class="front-latest-desc %s" % grid,
),
DIV(SPAN("Start Date: %s" % row.project_project.start_date,
_class="front-latest-info-date"),
SPAN("Countries: %s" % location,
_class="front-latest-info-location"),
_class="front-latest-info %s" % grid,
),
DIV(row.project_drrpp.activities or "",
_class="front-latest-desc %s" % grid,
),
_class=_class,
)
lappend(card)
odd = False if odd else True
login = current.auth.login(inline=True)
appname = request.application
s3 = response.s3
if current.session.s3.debug:
s3.scripts.append("/%s/static/themes/DRRPP/js/slides.jquery.js" % appname)
else:
s3.scripts.append("/%s/static/themes/DRRPP/js/slides.min.jquery.js" % appname)
s3.jquery_ready.append('''
$('#slides').slides({
play:8000,
animationStart:function(current){
$('.caption').animate({
bottom:-35
},100);
},
animationComplete:function(current){
$('.caption').animate({
bottom:0
},200);
},
slidesLoaded:function() {
$('.caption').animate({
bottom:0
},200);
}
})''')
return dict(title = "Home",
form = login,
latest_projects = latest_projects,
)
# =============================================================================
class register():
""" Custom Registration Page """
def __call__(self):
request = current.request
response = current.response
view = path.join(request.folder, "modules", "templates",
THEME, "views", "register.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
T = current.T
auth = current.auth
_settings = auth.settings
# Default the profile language to the one currently active
table = _settings.table_user
table.language.default = T.accepted_language
# Combo box for Organisation
table.organisation_id.widget = S3OrganisationAutocompleteWidget()
table.organisation_id.requires = IS_COMBO_BOX("org_organisation",
current.s3db.org_organisation_id.attr.requires)
# Custom onaccept to process custom fields
_settings.register_onaccept = register_onaccept
# Build the registration form
form = auth.register(js_validation=False)
# Set the formstyle
# @ToDo: Update to the fact that Auth now uses formstyle & use s3_addrow to add new rows
_form = form[0]
_form[-1] = TR(TD(_class="w2p_fl"),
TD(_class="w2p_fc"),
TD(INPUT(_type="submit",
_value=T("Register")),
_class="w2p_fw"),
_id="submit_record_row"
)
_form[0] = TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("First Name")),
_id="auth_user_first_name__label",
_for="auth_user_first_name"),
_class="w2p_fc"),
TD(INPUT(_id="auth_user_first_name",
_class="string",
_type="text",
_name="first_name",
_size="62"),
_class="w2p_fw"),
_id="auth_user_first_name_row"
)
_form[1] = TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Last Name")),
_id="auth_user_last_name__label",
_for="auth_user_last_name"),
_class="w2p_fc"),
TD(INPUT(_id="auth_user_last_name",
_class="string",
_type="text",
_name="last_name",
_size="62"),
_class="w2p_fw"),
_id="auth_user_last_name_row"
)
_form[2] = TR(TD(_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Organization")),
_id="auth_user_organisation_id__label",
_for="auth_user_organisation_id"),
_class="w2p_fc"),
TD(form.custom.widget.organisation_id,
_class="w2p_fw"),
_id="auth_user_organisation_id_row"
)
_form[3] = TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("E-Mail")),
_id="auth_user_email__label",
_for="auth_user_email"),
_class="w2p_fc"),
TD(INPUT(_id="auth_user_email",
_class="string",
_type="text",
_name="email",
_size="62"),
_class="w2p_fw"),
_id="auth_user_email_row"
)
_form[4] = TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Password")),
_id="auth_user_password__label",
_for="auth_user_password"),
_class="w2p_fc"),
TD(INPUT(_id="auth_user_password",
_type="password",
_name="password",
_class="password",
),
_class="w2p_fw"),
_id="auth_user_password_row"
)
_form[5] = TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Verify Password")),
_id="auth_user_password_two__label",
_for="auth_user_password_two"),
_class="w2p_fc"),
TD(INPUT(_id="auth_user_password_two",
_type="password",
_name="password_two",
_class="password",
),
_class="w2p_fw"),
_id="auth_user_password_two_row"
)
# Add custom fields
append = _form[2].append
append(
TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Role")),
_id="auth_user_position__label",
_for="auth_user_position"),
_class="w2p_fc"),
TD(SELECT(OPTION(_value=""),
OPTION(T("Practitioner"),
_value="1"),
OPTION(T("Consultant"),
_value="2"),
OPTION(T("Researcher"),
_value="3"),
OPTION(T("Academic"),
_value="4"),
OPTION(T("Student"),
_value="5"),
_name="position",
_id="auth_user_position",
_class="integer"
),
_class="w2p_fw"),
_id="auth_user_position_row"
)
)
append(
TR(TD(SPAN(" *", _class="req"),
DIV(_rel="If you do not specify an organisation, please enter your reason for using the DRR Project Portal.",
_class="labeltip"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Reason")),
_id="auth_user_reason__label",
_for="auth_user_reason"),
_class="w2p_fc"),
TD(TEXTAREA(_id="auth_user_reason",
_class="text",
_name="reason",
_rows="10",
_cols="50",
),
_class="w2p_fw"),
_id="auth_user_reason_row"
)
)
# Add client-side validation
s3 = response.s3
appname = request.application
if s3.debug:
s3.scripts.append("/%s/static/scripts/jquery.pstrength.2.1.0.js" % appname)
s3.scripts.append("/%s/static/scripts/jquery.validate.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/jquery.pstrength.2.1.0.min.js" % appname)
s3.scripts.append("/%s/static/scripts/jquery.validate.min.js" % appname)
# @ToDo: s3_unicode if site being used with i18n
s3.jquery_ready.append("".join(('''
$('.auth_register').validate({
errorClass:'req',
rules:{
first_name:{
required:true
},
last_name:{
required:true
},
position:{
required:true,
},
reason:{
required:true,
},
email:{
required:true,
email:true
},
password:{
required:true
},
password_two:{
required:true,
equalTo:'.password:first'
}
},
messages:{
first_name:"''', str(T("Enter your first name")), '''",
last_name:"''', str(T("Enter your last name")), '''",
position:"''', str(T("Select your role")), '''",
reason:"''', str(T("Enter a reason")), '''",
password:{
required:"''', str(T("Provide a password")), '''"
},
password_two:{
required:"''', str(T("Repeat your password")), '''",
equalTo:"''', str(T("Enter the same password as above")), '''"
},
email:{
required:"''', str(T("Please enter a valid email address")), '''",
email:"''', str(T("Please enter a valid email address")), '''"
}
},
errorPlacement:function(error,element){
error.appendTo(element.parent())
},
submitHandler:function(form){
form.submit()
}
})
$('.password:first').pstrength({minchar:''', str(_settings.password_min_length), ''',minchar_label:"''', str(T("The minimum number of characters is ")), '''"})
$('.labeltip').cluetip({activation:'hover',position:'mouse',sticky:false,showTitle:false,local:true})''')))
response.title = T("DRRPP - Register")
return dict(form=form)
# -----------------------------------------------------------------------------
def register_onaccept(form):
""" Tasks to be performed after a new user registers """
# Process Custom Fields
req_vars = form.request_vars
position = req_vars.get("position", "")
reason = req_vars.get("reason", "")
db = current.db
table = db.auth_user
db(table.id == form.vars.id).update(comments = "%s | %s" % (position, reason))
# =============================================================================
class contact():
""" Contact Form """
def __call__(self):
request = current.request
response = current.response
view = path.join(request.folder, "modules", "templates",
THEME, "views", "contact.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
if request.env.request_method == "POST":
# Processs Form
vars = request.post_vars
result = current.msg.send_email(
#to=current.deployment_settings.get_mail_approver(),
to="[email protected]",
subject=vars.subject,
message=vars.message,
reply_to=vars.address,
)
if result:
response.confirmation = "Thankyou for your message - we'll be in touch shortly"
#T = current.T
# form = FORM(TABLE(
# TR(LABEL("Your name:",
# SPAN(" *", _class="req"),
# _for="name")),
# TR(INPUT(_name="name", _type="text", _size=62, _maxlength="255")),
# TR(LABEL("Your e-mail address:",
# SPAN(" *", _class="req"),
# _for="address")),
# TR(INPUT(_name="address", _type="text", _size=62, _maxlength="255")),
# TR(LABEL("Subject:",
# SPAN(" *", _class="req"),
# _for="subject")),
# TR(INPUT(_name="subject", _type="text", _size=62, _maxlength="255")),
# TR(LABEL("Message:",
# SPAN(" *", _class="req"),
# _for="name")),
# TR(TEXTAREA(_name="message", _class="resizable", _rows=5, _cols=62)),
# TR(INPUT(_type="submit", _value="Send e-mail")),
# ),
# _id="contact-form"
# )
s3 = response.s3
if s3.cdn:
if s3.debug:
s3.scripts.append("http://ajax.aspnetcdn.com/ajax/jquery.validate/1.9/jquery.validate.js")
else:
s3.scripts.append("http://ajax.aspnetcdn.com/ajax/jquery.validate/1.9/jquery.validate.min.js")
else:
if s3.debug:
s3.scripts.append("/%s/static/scripts/jquery.validate.js" % request.application)
else:
s3.scripts.append("/%s/static/scripts/jquery.validate.min.js" % request.application)
s3.jquery_ready.append(
'''$('#contact-form').validate({
errorClass:'req',
rules:{
name:{
required:true
},
subject:{
required:true
},
message:{
required:true
},
name:{
required:true
},
address: {
required:true,
email:true
}
},
messages:{
name:"Enter your name",
subject:"Enter a subject",
message:"Enter a message",
address:{
required:"Please enter a valid email address",
email:"Please enter a valid email address"
}
},
errorPlacement:function(error,element){
error.appendTo(element.parents('tr').prev().children())
},
submitHandler:function(form){
form.submit()
}
})''')
response.title = "Contact | DRR Project Portal"
return dict(
#form=form
)
# =============================================================================
class about():
"""
Custom About page
"""
def __call__(self):
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "modules", "templates",
THEME, "views", "about.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("About")
return dict(
title=T("About"),
)
# =============================================================================
class admin():
"""
Custom Admin Index Page
"""
def __call__(self):
auth = current.auth
s3_has_role = auth.s3_has_role
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
ORG_ADMIN = system_roles.ORG_ADMIN
if s3_has_role(ADMIN) | s3_has_role(ORG_ADMIN):
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "modules", "templates",
THEME, "views", "admin.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("Administration Panel")
panel_list = [A(T("Verify Users"),
_href = URL(c="admin", f = "user")
),
A(T("User List (Excel)"),
_href = URL(c="admin", f = "user.xls")
),
A(T("Manage Administrators"),
_href = URL(c="admin", f = "role", args = [1,"users"])
),
A(T("Manage Organization Contacts"),
_href = URL(c="admin", f = "role", args = [6,"users"])
),
A(T("Manage Organizations"),
_href = URL(c="org", f = "organisation")
),
A(T("Approve Projects"),
_href = URL(c="project", f = "project", args = "review")
),
A(T("Approve Frameworks"),
_href = URL(c="project", f = "framework", args = "review")
),
A(T("Approve Organisations"),
_href = URL(c="org", f = "organisation", args = "review")
),
A(T("Edit Countries and Administrative Areas"),
_href = URL(c="gis", f = "location")
),
A(T("Edit Hazards"),
_href = URL(c="project", f = "hazard")
),
A(T("Edit Themes"),
_href = URL(c="project", f = "theme")
),
]
return dict(item = UL(*panel_list,
_id = "admin_panel_list") )
else:
redirect(URL(c="default", f="index"))
# =============================================================================
class analysis():
"""
Custom page for Project Analysis
"""
def __call__(self):
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "modules", "templates",
THEME, "views", "analysis.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("Project Analysis")
return dict(
title=T("Project Analysis"),
)
# =============================================================================
class get_started():
"""
Custom page
"""
def __call__(self):
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "modules", "templates",
THEME, "views", "get_started.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("Get Started")
return dict(
)
# =============================================================================
class login():
"""
Custom Login page
"""
def __call__(self):
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "modules", "templates",
THEME, "views", "login.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("Login")
return dict(
form = current.auth.login()
)
# =============================================================================
class mypage():
"""
Custom page for a User to manage their Saved Search & Subscriptions
@todo: SavedSearch deprecated,
re-implement with saved filters / S3Notify
"""
def __call__(self):
auth = current.auth
#if not auth.is_logged_in():
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "modules", "templates",
THEME, "views", "mypage.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("My Page")
return dict(
title=T("My Page"),
)
#else:
# person_id = auth.s3_logged_in_person()
# redirect(URL(c="pr", f="person", args=[person_id, "saved_search"]))
# =============================================================================
class organisations():
"""
Custom page to show 2 dataTables on a single page:
* Regional Organisations
* Committees, Forums, Mechanism, Meetings and Networks
"""
def __call__(self):
#T = current.T
request = current.request
response = current.response
response.title = "DRR Projects Portal - Regional Organizations"
view = path.join(request.folder, "modules", "templates",
THEME, "views", "organisations.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
s3 = response.s3
s3["dataTable_dom"] = 'ripl<"dataTable_table"t>p'
tables = []
table = request.vars.get("table", None)
if table is None:
# HTML call
if s3.debug:
append = s3.scripts.append
appname = request.application
append("/%s/static/scripts/jquery.dataTables.js" % appname)
append("/%s/static/scripts/jquery.dataTables.fnSetFilteringDelay.js" % appname)
append("/%s/static/scripts/jquery.dataTables.sortFunctions.js" % appname)
append("/%s/static/scripts/S3/s3.dataTables.multi.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.dataTables.multi.min.js" % request.application)
s3.js_global.append('''S3.dataTablesInstances=[]''')
s3request, list_fields = self._regional()
tables.append(self._table("regional", s3request.resource, list_fields))
s3request, list_fields = self._groups()
tables.append(self._table("groups", s3request.resource, list_fields))
else:
# AJAX call
if table == "groups":
s3request, list_fields = self._groups()
elif table == "regional":
s3request, list_fields = self._regional()
current.s3db.configure(s3request.resource.tablename,
list_fields = list_fields)
return s3request()
return dict(tables=tables)
# -------------------------------------------------------------------------
@staticmethod
def _regional():
"""
Regional Organisations
- Filtered subset of Organisations
"""
T = current.T
s3request = s3_request("org", "organisation", extension="aadata")
# (FS("project.id") != None) & \
f = (FS("organisation_type.name").anyof(["Regional Organisation",
"Regional Office",
"Regional Center"]))
s3request.resource.add_filter(f)
list_fields = ["id",
"name",
"acronym",
(T("Type"), "organisation_organisation_type.organisation_type_id"),
"website",
"region_id",
"year",
(T("Notes"), "comments"),
]
return (s3request, list_fields)
# -------------------------------------------------------------------------
@staticmethod
def _groups():
"""
Committees/Mechanisms/Forums & Networks
- Filtered subset of Organisations
"""
T = current.T
s3db = current.s3db
table = s3db.org_organisation
table.address = Field.Method("address",
s3db.org_organisation_address)
s3request = s3_request("org", "organisation", extension="aadata")
#(FS("project.id") != None) & \
f = (FS("organisation_type.name").anyof(["Committees/Mechanism/Forum",
"Network"]))
s3request.resource.add_filter(f)
list_fields = ["id",
"name",
"acronym",
(T("Type"), "organisation_organisation_type.organisation_type_id"),
"year",
(T("Address"), "address"),
(T("Notes"), "comments"),
]
return (s3request, list_fields)
# -------------------------------------------------------------------------
@staticmethod
def _table(name, resource, field_list, limit=10, orderby="name"):
""" Generate a datatable in the organisations custom page """
data = resource.select(field_list,
limit=None,
orderby=orderby,
count=True,
represent=True)
rfields = data["rfields"]
records = data["rows"]
numrows = len(records)
rows = []
cols = []
for rfield in rfields:
colname = rfield.colname
cols.append({"name": colname, "label": rfield.label})
for i in xrange(numrows):
if len(rows) == i:
rows.append([])
rows[i].append(records[i][colname])
options = json.dumps({
#"ajax": "/%s/default/index/organisations/?table=%s" % (current.request.application, name),
"deferLoading": data["numrows"],
"columnDefs": [{"targets": [0]}],
"columns": [{"name": col["name"],
"visible": False,
} for col in cols],
"dom": 'rifpl<"dataTable_table"t>p',
"pageLength": limit,
"processing": True,
#"serverSide": True,
})
script = '''S3.dataTablesInstances.push({'options':%s})''' % XML(options)
current.response.s3.js_global.append(script)
table = Storage(cols=cols,
rows=rows,
#options=options,
)
return table
# END =========================================================================
| mit | 958,735,573,453,550,800 | 35.726012 | 159 | 0.435784 | false |
willrogers/pml | setup.py | 1 | 3066 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pytac',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.3',
description='Python Middlelayer is a Python library intended to make it easy to work with particle accelerators.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/willrogers/pml',
# Author details
author='Will Rogers, Razvan Vasile',
author_email='[email protected], [email protected]',
# Choose your license
license='Apache License 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='accelerator physics development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages['pytac', 'test']),
packages=['pytac'],
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
| apache-2.0 | -5,357,689,889,863,483,000 | 33.066667 | 118 | 0.667319 | false |
Dubrzr/golb | users/models.py | 1 | 1432 | from django.contrib.auth.base_user import BaseUserManager, AbstractBaseUser
from django.db import models
class UserManager(BaseUserManager):
def create_user(self, **user_data):
user = self.model()
print(dict(user_data))
user.set_password(user_data.pop('password'))
for key, value in user_data.items():
setattr(user, key, value)
user.save(using=self.db)
return user
def create_superuser(self, **user_data):
user = self.create_user(**user_data)
user.is_admin = True
user.is_superuser = True
user.is_staff = True
user.is_active = True
user.save(using=self.db)
return user
class User(AbstractBaseUser):
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['first_name', 'last_name', 'email', 'is_admin']
first_name = models.CharField(max_length=254)
last_name = models.CharField(max_length=254)
username = models.CharField(max_length=254, unique=True)
email = models.EmailField(max_length=254, unique=True)
date_joined = models.DateTimeField(auto_now=True)
is_admin = models.BooleanField(default=False)
def save(self, *args, **kwargs):
if not self.pk:
self.is_superuser = self.is_admin
self.is_staff = self.is_admin
super().save(*args, **kwargs)
else:
super().save(*args, **kwargs)
| mit | 1,415,178,026,668,730,400 | 31.545455 | 75 | 0.627793 | false |
lino-framework/xl | lino_xl/lib/products/fixtures/furniture.py | 1 | 4427 | # -*- coding: UTF-8 -*-
# Copyright 2009-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.utils.instantiator import Instantiator
from lino_xl.lib.products.choicelists import ProductTypes
from lino.api import dd, _
def objects():
productcat = Instantiator('products.Category').build
product = Instantiator('products.Product', "sales_price category").build
furniture = productcat(
id=1, product_type=ProductTypes.default, **dd.babel_values(
'name', _("Furniture"), et="Mööbel", de="Möbel", fr="Meubles"))
yield furniture
# print "foo", furniture.id, furniture
hosting = productcat(
id=2, product_type=ProductTypes.default, **dd.babel_values(
'name', _("Website Hosting"),
et="Veebimajutus",
de="Website-Hosting",
fr="Hébergement de sites Internet"))
yield hosting
other = productcat(id=3, **dd.str2kw('name', _("Other")))
yield other
kw = dd.babel_values('name', _("Wooden table"),
et=u"Laud puidust",
de="Tisch aus Holz",
fr=u"Table en bois")
kw.update(dd.babel_values(
'description', _("""\
This table is made of pure wood.
It has **four legs**.
Designed to fit perfectly with **up to 6 wooden chairs**.
Product of the year 2008."""),
et="""\
See laud on tehtud ehtsast puust.
Sellel on **neli jalga**.
Disainitud sobida kokku **kuni 6 puidust tooliga**.
Product of the year 2008.""",
de="""\
Dieser Tisch ist aus echtem Holz.
Er hat **vier Beine**.
Passt perfekt zusammen mit **bis zu 6 Stühlen aus Holz**.
Produkt des Jahres 2008.""",
fr="""\
Cette table est en bois authentique.
Elle a **quatre jambes**.
Conçue pour mettre jusqu'à **6 chaises en bois**.
Produit de l'année 2008.""",
))
yield product("199.99", 1, **kw)
yield product("99.99", 1, **dd.babel_values('name', _("Wooden chair"),
et="Tool puidust",
de="Stuhl aus Holz",
fr="Chaise en bois"))
yield product("129.99", 1, **dd.babel_values('name', _("Metal table"),
et="Laud metallist",
de="Tisch aus Metall",
fr="Table en métal"))
yield product("79.99", 1, **dd.babel_values('name', _("Metal chair"),
et="Tool metallist",
de="Stuhl aus Metall",
fr="Chaise en métal"))
hosting = product("3.99", 2,
**dd.babel_values('name', _("Website hosting 1MB/month"),
et="Majutus 1MB/s",
de="Website-Hosting 1MB/Monat",
fr="Hébergement 1MB/mois"))
yield hosting
yield product("30.00", 2,
**dd.babel_values('name', _("IT consultation & maintenance"),
et=u"IKT konsultatsioonid & hooldustööd",
de=u"EDV Konsultierung & Unterhaltsarbeiten",
fr=u"ICT Consultation & maintenance"))
yield product("35.00", 2, **dd.babel_values(
'name', _("Server software installation, configuration and administration"),
et="Serveritarkvara installeerimine, seadistamine ja administreerimine",
de="Server software installation, configuration and administration",
fr="Server software installation, configuration and administration"))
yield product("40.00", 2, **dd.babel_values(
'name', _("Programming"),
et="Programmeerimistööd",
de="Programmierung",
fr="Programmation"))
yield product("25.00", 2, **dd.babel_values(
'name', _("Image processing and website content maintenance"),
et="Pilditöötlus ja kodulehtede sisuhaldustööd",
de="Bildbearbeitung und Unterhalt Website",
fr="Traitement d'images et maintenance site existant"))
yield product("29.90", 3, **dd.str2kw('name', _("Book"), vat_class="reduced"))
yield product("1.40", 3, **dd.str2kw('name', _("Stamp"), vat_class="exempt"))
| bsd-2-clause | -2,917,680,532,478,270,000 | 42.643564 | 84 | 0.54696 | false |
transt/cloud-init-0.7.5 | cloudinit/util.py | 1 | 58202 | # vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <[email protected]>
# Author: Juerg Haefliger <[email protected]>
# Author: Joshua Harlow <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# pylint: disable=C0302
from StringIO import StringIO
import contextlib
import copy as obj_copy
import ctypes
import errno
import glob
import grp
import gzip
import hashlib
import json
import os
import os.path
import platform
import pwd
import random
import re
import shutil
import socket
import stat
import string # pylint: disable=W0402
import subprocess
import sys
import tempfile
import time
import urlparse
import yaml
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import mergers
from cloudinit import safeyaml
from cloudinit import type_utils
from cloudinit import url_helper
from cloudinit import version
from cloudinit.settings import (CFG_BUILTIN)
_DNS_REDIRECT_IP = None
LOG = logging.getLogger(__name__)
# Helps cleanup filenames to ensure they aren't FS incompatible
FN_REPLACEMENTS = {
os.sep: '_',
}
FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
# Helper utils to see if running in a container
CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
class ProcessExecutionError(IOError):
MESSAGE_TMPL = ('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Reason: %(reason)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r')
def __init__(self, stdout=None, stderr=None,
exit_code=None, cmd=None,
description=None, reason=None):
if not cmd:
self.cmd = '-'
else:
self.cmd = cmd
if not description:
self.description = 'Unexpected error while running command.'
else:
self.description = description
if not isinstance(exit_code, (long, int)):
self.exit_code = '-'
else:
self.exit_code = exit_code
if not stderr:
self.stderr = ''
else:
self.stderr = stderr
if not stdout:
self.stdout = ''
else:
self.stdout = stdout
if reason:
self.reason = reason
else:
self.reason = '-'
message = self.MESSAGE_TMPL % {
'description': self.description,
'cmd': self.cmd,
'exit_code': self.exit_code,
'stdout': self.stdout,
'stderr': self.stderr,
'reason': self.reason,
}
IOError.__init__(self, message)
class SeLinuxGuard(object):
def __init__(self, path, recursive=False):
# Late import since it might not always
# be possible to use this
try:
self.selinux = importer.import_module('selinux')
except ImportError:
self.selinux = None
self.path = path
self.recursive = recursive
def __enter__(self):
if self.selinux and self.selinux.is_selinux_enabled():
return True
else:
return False
def __exit__(self, excp_type, excp_value, excp_traceback):
if self.selinux and self.selinux.is_selinux_enabled():
path = os.path.realpath(os.path.expanduser(self.path))
# path should be a string, not unicode
path = str(path)
do_restore = False
try:
# See if even worth restoring??
stats = os.lstat(path)
if stat.ST_MODE in stats:
self.selinux.matchpathcon(path, stats[stat.ST_MODE])
do_restore = True
except OSError:
pass
if do_restore:
LOG.debug("Restoring selinux mode for %s (recursive=%s)",
path, self.recursive)
self.selinux.restorecon(path, recursive=self.recursive)
class MountFailedError(Exception):
pass
class DecompressionError(Exception):
pass
def ExtendedTemporaryFile(**kwargs):
fh = tempfile.NamedTemporaryFile(**kwargs)
# Replace its unlink with a quiet version
# that does not raise errors when the
# file to unlink has been unlinked elsewhere..
LOG.debug("Created temporary file %s", fh.name)
fh.unlink = del_file
# Add a new method that will unlink
# right 'now' but still lets the exit
# method attempt to remove it (which will
# not throw due to our del file being quiet
# about files that are not there)
def unlink_now():
fh.unlink(fh.name)
setattr(fh, 'unlink_now', unlink_now)
return fh
def fork_cb(child_cb, *args):
fid = os.fork()
if fid == 0:
try:
child_cb(*args)
os._exit(0) # pylint: disable=W0212
except:
logexc(LOG, "Failed forking and calling callback %s",
type_utils.obj_name(child_cb))
os._exit(1) # pylint: disable=W0212
else:
LOG.debug("Forked child %s who will run callback %s",
fid, type_utils.obj_name(child_cb))
def is_true(val, addons=None):
if isinstance(val, (bool)):
return val is True
check_set = ['true', '1', 'on', 'yes']
if addons:
check_set = check_set + addons
if str(val).lower().strip() in check_set:
return True
return False
def is_false(val, addons=None):
if isinstance(val, (bool)):
return val is False
check_set = ['off', '0', 'no', 'false']
if addons:
check_set = check_set + addons
if str(val).lower().strip() in check_set:
return True
return False
def translate_bool(val, addons=None):
if not val:
# This handles empty lists and false and
# other things that python believes are false
return False
# If its already a boolean skip
if isinstance(val, (bool)):
return val
return is_true(val, addons)
def rand_str(strlen=32, select_from=None):
if not select_from:
select_from = string.letters + string.digits
return "".join([random.choice(select_from) for _x in range(0, strlen)])
def read_conf(fname):
try:
return load_yaml(load_file(fname), default={})
except IOError as e:
if e.errno == errno.ENOENT:
return {}
else:
raise
# Merges X lists, and then keeps the
# unique ones, but orders by sort order
# instead of by the original order
def uniq_merge_sorted(*lists):
return sorted(uniq_merge(*lists))
# Merges X lists and then iterates over those
# and only keeps the unique items (order preserving)
# and returns that merged and uniqued list as the
# final result.
#
# Note: if any entry is a string it will be
# split on commas and empty entries will be
# evicted and merged in accordingly.
def uniq_merge(*lists):
combined_list = []
for a_list in lists:
if isinstance(a_list, (str, basestring)):
a_list = a_list.strip().split(",")
# Kickout the empty ones
a_list = [a for a in a_list if len(a)]
combined_list.extend(a_list)
return uniq_list(combined_list)
def clean_filename(fn):
for (k, v) in FN_REPLACEMENTS.iteritems():
fn = fn.replace(k, v)
removals = []
for k in fn:
if k not in FN_ALLOWED:
removals.append(k)
for k in removals:
fn = fn.replace(k, '')
fn = fn.strip()
return fn
def decomp_gzip(data, quiet=True):
try:
buf = StringIO(str(data))
with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
return gh.read()
except Exception as e:
if quiet:
return data
else:
raise DecompressionError(str(e))
def extract_usergroup(ug_pair):
if not ug_pair:
return (None, None)
ug_parted = ug_pair.split(':', 1)
u = ug_parted[0].strip()
if len(ug_parted) == 2:
g = ug_parted[1].strip()
else:
g = None
if not u or u == "-1" or u.lower() == "none":
u = None
if not g or g == "-1" or g.lower() == "none":
g = None
return (u, g)
def find_modules(root_dir):
entries = dict()
for fname in glob.glob(os.path.join(root_dir, "*.py")):
if not os.path.isfile(fname):
continue
modname = os.path.basename(fname)[0:-3]
modname = modname.strip()
if modname and modname.find(".") == -1:
entries[fname] = modname
return entries
def multi_log(text, console=True, stderr=True,
log=None, log_level=logging.DEBUG):
if stderr:
sys.stderr.write(text)
if console:
conpath = "/dev/console"
if os.path.exists(conpath):
with open(conpath, 'wb') as wfh:
wfh.write(text)
wfh.flush()
else:
# A container may lack /dev/console (arguably a container bug). If
# it does not exist, then write output to stdout. this will result
# in duplicate stderr and stdout messages if stderr was True.
#
# even though upstart or systemd might have set up output to go to
# /dev/console, the user may have configured elsewhere via
# cloud-config 'output'. If there is /dev/console, messages will
# still get there.
sys.stdout.write(text)
if log:
if text[-1] == "\n":
log.log(log_level, text[:-1])
else:
log.log(log_level, text)
def load_json(text, root_types=(dict,)):
decoded = json.loads(text)
if not isinstance(decoded, tuple(root_types)):
expected_types = ", ".join([str(t) for t in root_types])
raise TypeError("(%s) root types expected, got %s instead"
% (expected_types, type(decoded)))
return decoded
def is_ipv4(instr):
"""determine if input string is a ipv4 address. return boolean."""
toks = instr.split('.')
if len(toks) != 4:
return False
try:
toks = [x for x in toks if int(x) < 256 and int(x) >= 0]
except:
return False
return len(toks) == 4
def get_cfg_option_bool(yobj, key, default=False):
if key not in yobj:
return default
return translate_bool(yobj[key])
def get_cfg_option_str(yobj, key, default=None):
if key not in yobj:
return default
val = yobj[key]
if not isinstance(val, (str, basestring)):
val = str(val)
return val
def system_info():
return {
'platform': platform.platform(),
'release': platform.release(),
'python': platform.python_version(),
'uname': platform.uname(),
'dist': platform.linux_distribution(),
}
def get_cfg_option_list(yobj, key, default=None):
"""
Gets the C{key} config option from C{yobj} as a list of strings. If the
key is present as a single string it will be returned as a list with one
string arg.
@param yobj: The configuration object.
@param key: The configuration key to get.
@param default: The default to return if key is not found.
@return: The configuration option as a list of strings or default if key
is not found.
"""
if not key in yobj:
return default
if yobj[key] is None:
return []
val = yobj[key]
if isinstance(val, (list)):
cval = [v for v in val]
return cval
if not isinstance(val, (basestring)):
val = str(val)
return [val]
# get a cfg entry by its path array
# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
def get_cfg_by_path(yobj, keyp, default=None):
cur = yobj
for tok in keyp:
if tok not in cur:
return default
cur = cur[tok]
return cur
def fixup_output(cfg, mode):
(outfmt, errfmt) = get_output_cfg(cfg, mode)
redirect_output(outfmt, errfmt)
return (outfmt, errfmt)
# redirect_output(outfmt, errfmt, orig_out, orig_err)
# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
# fmt can be:
# > FILEPATH
# >> FILEPATH
# | program [ arg1 [ arg2 [ ... ] ] ]
#
# with a '|', arguments are passed to shell, so one level of
# shell escape is required.
#
# if _CLOUD_INIT_SAVE_STDOUT is set in environment to a non empty and true
# value then output input will not be closed (useful for debugging).
#
def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDOUT")):
LOG.debug("Not redirecting output due to _CLOUD_INIT_SAVE_STDOUT")
return
if not o_out:
o_out = sys.stdout
if not o_err:
o_err = sys.stderr
if outfmt:
LOG.debug("Redirecting %s to %s", o_out, outfmt)
(mode, arg) = outfmt.split(" ", 1)
if mode == ">" or mode == ">>":
owith = "ab"
if mode == ">":
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
new_fp = proc.stdin # pylint: disable=E1101
else:
raise TypeError("Invalid type for output format: %s" % outfmt)
if o_out:
os.dup2(new_fp.fileno(), o_out.fileno())
if errfmt == outfmt:
LOG.debug("Redirecting %s to %s", o_err, outfmt)
os.dup2(new_fp.fileno(), o_err.fileno())
return
if errfmt:
LOG.debug("Redirecting %s to %s", o_err, errfmt)
(mode, arg) = errfmt.split(" ", 1)
if mode == ">" or mode == ">>":
owith = "ab"
if mode == ">":
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
new_fp = proc.stdin # pylint: disable=E1101
else:
raise TypeError("Invalid type for error format: %s" % errfmt)
if o_err:
os.dup2(new_fp.fileno(), o_err.fileno())
def make_url(scheme, host, port=None,
path='', params='', query='', fragment=''):
pieces = []
pieces.append(scheme or '')
netloc = ''
if host:
netloc = str(host)
if port is not None:
netloc += ":" + "%s" % (port)
pieces.append(netloc or '')
pieces.append(path or '')
pieces.append(params or '')
pieces.append(query or '')
pieces.append(fragment or '')
return urlparse.urlunparse(pieces)
def mergemanydict(srcs, reverse=False):
if reverse:
srcs = reversed(srcs)
merged_cfg = {}
for cfg in srcs:
if cfg:
# Figure out which mergers to apply...
mergers_to_apply = mergers.dict_extract_mergers(cfg)
if not mergers_to_apply:
mergers_to_apply = mergers.default_mergers()
merger = mergers.construct(mergers_to_apply)
merged_cfg = merger.merge(merged_cfg, cfg)
return merged_cfg
@contextlib.contextmanager
def chdir(ndir):
curr = os.getcwd()
try:
os.chdir(ndir)
yield ndir
finally:
os.chdir(curr)
@contextlib.contextmanager
def umask(n_msk):
old = os.umask(n_msk)
try:
yield old
finally:
os.umask(old)
@contextlib.contextmanager
def tempdir(**kwargs):
# This seems like it was only added in python 3.2
# Make it since its useful...
# See: http://bugs.python.org/file12970/tempdir.patch
tdir = tempfile.mkdtemp(**kwargs)
try:
yield tdir
finally:
del_dir(tdir)
def center(text, fill, max_len):
return '{0:{fill}{align}{size}}'.format(text, fill=fill,
align="^", size=max_len)
def del_dir(path):
LOG.debug("Recursively deleting %s", path)
shutil.rmtree(path)
def runparts(dirp, skip_no_exist=True, exe_prefix=None):
if skip_no_exist and not os.path.isdir(dirp):
return
failed = []
attempted = []
if exe_prefix is None:
prefix = []
elif isinstance(exe_prefix, str):
prefix = [str(exe_prefix)]
elif isinstance(exe_prefix, list):
prefix = exe_prefix
else:
raise TypeError("exe_prefix must be None, str, or list")
for exe_name in sorted(os.listdir(dirp)):
exe_path = os.path.join(dirp, exe_name)
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
attempted.append(exe_path)
try:
subp(prefix + [exe_path], capture=False)
except ProcessExecutionError as e:
logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
failed.append(e)
if failed and attempted:
raise RuntimeError('Runparts: %s failures in %s attempted commands'
% (len(failed), len(attempted)))
# read_optional_seed
# returns boolean indicating success or failure (presense of files)
# if files are present, populates 'fill' dictionary with 'user-data' and
# 'meta-data' entries
def read_optional_seed(fill, base="", ext="", timeout=5):
try:
(md, ud) = read_seeded(base, ext, timeout)
fill['user-data'] = ud
fill['meta-data'] = md
return True
except url_helper.UrlError as e:
if e.code == url_helper.NOT_FOUND:
return False
raise
def fetch_ssl_details(paths=None):
ssl_details = {}
# Lookup in these locations for ssl key/cert files
ssl_cert_paths = [
'/opt/freeware/var/lib/cloud/data/ssl',
'/opt/freeware/var/lib/cloud/instance/data/ssl',
]
if paths:
ssl_cert_paths.extend([
os.path.join(paths.get_ipath_cur('data'), 'ssl'),
os.path.join(paths.get_cpath('data'), 'ssl'),
])
ssl_cert_paths = uniq_merge(ssl_cert_paths)
ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
cert_file = None
for d in ssl_cert_paths:
if os.path.isfile(os.path.join(d, 'cert.pem')):
cert_file = os.path.join(d, 'cert.pem')
break
key_file = None
for d in ssl_cert_paths:
if os.path.isfile(os.path.join(d, 'key.pem')):
key_file = os.path.join(d, 'key.pem')
break
if cert_file and key_file:
ssl_details['cert_file'] = cert_file
ssl_details['key_file'] = key_file
elif cert_file:
ssl_details['cert_file'] = cert_file
return ssl_details
def read_file_or_url(url, timeout=5, retries=10,
headers=None, data=None, sec_between=1, ssl_details=None,
headers_cb=None, exception_cb=None):
url = url.lstrip()
if url.startswith("/"):
url = "file://%s" % url
if url.lower().startswith("file://"):
if data:
LOG.warn("Unable to post data to file resource %s", url)
file_path = url[len("file://"):]
try:
contents = load_file(file_path)
except IOError as e:
code = e.errno
if e.errno == errno.ENOENT:
code = url_helper.NOT_FOUND
raise url_helper.UrlError(cause=e, code=code, headers=None)
return url_helper.FileResponse(file_path, contents=contents)
else:
return url_helper.readurl(url,
timeout=timeout,
retries=retries,
headers=headers,
headers_cb=headers_cb,
data=data,
sec_between=sec_between,
ssl_details=ssl_details,
exception_cb=exception_cb)
def load_yaml(blob, default=None, allowed=(dict,)):
loaded = default
try:
blob = str(blob)
LOG.debug(("Attempting to load yaml from string "
"of length %s with allowed root types %s"),
len(blob), allowed)
converted = safeyaml.load(blob)
if not isinstance(converted, allowed):
# Yes this will just be caught, but thats ok for now...
raise TypeError(("Yaml load allows %s root types,"
" but got %s instead") %
(allowed, type_utils.obj_name(converted)))
loaded = converted
except (yaml.YAMLError, TypeError, ValueError):
if len(blob) == 0:
LOG.debug("load_yaml given empty string, returning default")
else:
logexc(LOG, "Failed loading yaml blob")
return loaded
def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
if base.startswith("/"):
base = "file://%s" % base
# default retries for file is 0. for network is 10
if base.startswith("file://"):
retries = file_retries
if base.find("%s") >= 0:
ud_url = base % ("user-data" + ext)
md_url = base % ("meta-data" + ext)
else:
ud_url = "%s%s%s" % (base, "user-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
md = None
if md_resp.ok():
md_str = str(md_resp)
md = load_yaml(md_str, default={})
ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
ud = None
if ud_resp.ok():
ud_str = str(ud_resp)
ud = ud_str
return (md, ud)
def read_conf_d(confd):
# Get reverse sorted list (later trumps newer)
confs = sorted(os.listdir(confd), reverse=True)
# Remove anything not ending in '.cfg'
confs = [f for f in confs if f.endswith(".cfg")]
# Remove anything not a file
confs = [f for f in confs
if os.path.isfile(os.path.join(confd, f))]
# Load them all so that they can be merged
cfgs = []
for fn in confs:
cfgs.append(read_conf(os.path.join(confd, fn)))
return mergemanydict(cfgs)
def read_conf_with_confd(cfgfile):
cfg = read_conf(cfgfile)
confd = False
if "conf_d" in cfg:
confd = cfg['conf_d']
if confd:
if not isinstance(confd, (str, basestring)):
raise TypeError(("Config file %s contains 'conf_d' "
"with non-string type %s") %
(cfgfile, type_utils.obj_name(confd)))
else:
confd = str(confd).strip()
elif os.path.isdir("%s.d" % cfgfile):
confd = "%s.d" % cfgfile
if not confd or not os.path.isdir(confd):
return cfg
# Conf.d settings override input configuration
confd_cfg = read_conf_d(confd)
return mergemanydict([confd_cfg, cfg])
def read_cc_from_cmdline(cmdline=None):
# this should support reading cloud-config information from
# the kernel command line. It is intended to support content of the
# format:
# cc: <yaml content here> [end_cc]
# this would include:
# cc: ssh_import_id: [smoser, kirkland]\\n
# cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
# cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc
if cmdline is None:
cmdline = get_cmdline()
tag_begin = "cc:"
tag_end = "end_cc"
begin_l = len(tag_begin)
end_l = len(tag_end)
clen = len(cmdline)
tokens = []
begin = cmdline.find(tag_begin)
while begin >= 0:
end = cmdline.find(tag_end, begin + begin_l)
if end < 0:
end = clen
tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n",
"\n"))
begin = cmdline.find(tag_begin, end + end_l)
return '\n'.join(tokens)
def dos2unix(contents):
# find first end of line
pos = contents.find('\n')
if pos <= 0 or contents[pos - 1] != '\r':
return contents
return contents.replace('\r\n', '\n')
def get_hostname_fqdn(cfg, cloud):
# return the hostname and fqdn from 'cfg'. If not found in cfg,
# then fall back to data from cloud
if "fqdn" in cfg:
# user specified a fqdn. Default hostname then is based off that
fqdn = cfg['fqdn']
hostname = get_cfg_option_str(cfg, "hostname", fqdn.split('.')[0])
else:
if "hostname" in cfg and cfg['hostname'].find('.') > 0:
# user specified hostname, and it had '.' in it
# be nice to them. set fqdn and hostname from that
fqdn = cfg['hostname']
hostname = cfg['hostname'][:fqdn.find('.')]
else:
# no fqdn set, get fqdn from cloud.
# get hostname from cfg if available otherwise cloud
fqdn = cloud.get_hostname(fqdn=True)
if "hostname" in cfg:
hostname = cfg['hostname']
else:
hostname = cloud.get_hostname()
return (hostname, fqdn)
def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
"""
For each host a single line should be present with
the following information:
IP_address canonical_hostname [aliases...]
Fields of the entry are separated by any number of blanks and/or tab
characters. Text from a "#" character until the end of the line is a
comment, and is ignored. Host names may contain only alphanumeric
characters, minus signs ("-"), and periods ("."). They must begin with
an alphabetic character and end with an alphanumeric character.
Optional aliases provide for name changes, alternate spellings, shorter
hostnames, or generic hostnames (for example, localhost).
"""
fqdn = None
try:
for line in load_file(filename).splitlines():
hashpos = line.find("#")
if hashpos >= 0:
line = line[0:hashpos]
line = line.strip()
if not line:
continue
# If there there is less than 3 entries
# (IP_address, canonical_hostname, alias)
# then ignore this line
toks = line.split()
if len(toks) < 3:
continue
if hostname in toks[2:]:
fqdn = toks[1]
break
except IOError:
pass
return fqdn
def get_cmdline_url(names=('cloud-config-url', 'url'),
starts="#cloud-config", cmdline=None):
if cmdline is None:
cmdline = get_cmdline()
data = keyval_str_to_dict(cmdline)
url = None
key = None
for key in names:
if key in data:
url = data[key]
break
if not url:
return (None, None, None)
resp = read_file_or_url(url)
if resp.contents.startswith(starts) and resp.ok():
return (key, url, str(resp))
return (key, url, None)
def is_resolvable(name):
"""determine if a url is resolvable, return a boolean
This also attempts to be resilent against dns redirection.
Note, that normal nsswitch resolution is used here. So in order
to avoid any utilization of 'search' entries in /etc/resolv.conf
we have to append '.'.
The top level 'invalid' domain is invalid per RFC. And example.com
should also not exist. The random entry will be resolved inside
the search list.
"""
global _DNS_REDIRECT_IP # pylint: disable=W0603
if _DNS_REDIRECT_IP is None:
badips = set()
badnames = ("does-not-exist.example.com.", "example.invalid.",
rand_str())
badresults = {}
for iname in badnames:
try:
result = socket.getaddrinfo(iname, None, 0, 0,
socket.SOCK_STREAM, socket.AI_CANONNAME)
badresults[iname] = []
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
badips.add(sockaddr[0])
except (socket.gaierror, socket.error):
pass
_DNS_REDIRECT_IP = badips
if badresults:
LOG.debug("detected dns redirection: %s", badresults)
try:
result = socket.getaddrinfo(name, None)
# check first result's sockaddr field
addr = result[0][4][0]
if addr in _DNS_REDIRECT_IP:
return False
return True
except (socket.gaierror, socket.error):
return False
def get_hostname():
hostname = socket.gethostname()
return hostname
def gethostbyaddr(ip):
try:
return socket.gethostbyaddr(ip)[0]
except socket.herror:
return None
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
return is_resolvable(urlparse.urlparse(url).hostname)
def search_for_mirror(candidates):
"""
Search through a list of mirror urls for one that works
This needs to return quickly.
"""
for cand in candidates:
try:
if is_resolvable_url(cand):
return cand
except Exception:
pass
return None
def close_stdin():
"""
reopen stdin as /dev/null so even subprocesses or other os level things get
/dev/null as input.
if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true
value then input will not be closed (useful for debugging).
"""
if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDIN")):
return
with open(os.devnull) as fp:
os.dup2(fp.fileno(), sys.stdin.fileno())
def find_devs_with(criteria=None, oformat='device',
tag=None, no_cache=False, path=None):
"""
find devices matching given criteria (via blkid)
criteria can be *one* of:
TYPE=<filesystem>
LABEL=<label>
UUID=<uuid>
"""
blk_id_cmd = ['blkid']
options = []
if criteria:
# Search for block devices with tokens named NAME that
# have the value 'value' and display any devices which are found.
# Common values for NAME include TYPE, LABEL, and UUID.
# If there are no devices specified on the command line,
# all block devices will be searched; otherwise,
# only search the devices specified by the user.
options.append("-t%s" % (criteria))
if tag:
# For each (specified) device, show only the tags that match tag.
options.append("-s%s" % (tag))
if no_cache:
# If you want to start with a clean cache
# (i.e. don't report devices previously scanned
# but not necessarily available at this time), specify /dev/null.
options.extend(["-c", "/dev/null"])
if oformat:
# Display blkid's output using the specified format.
# The format parameter may be:
# full, value, list, device, udev, export
options.append('-o%s' % (oformat))
if path:
options.append(path)
cmd = blk_id_cmd + options
# See man blkid for why 2 is added
(out, _err) = subp(cmd, rcs=[0, 2])
entries = []
for line in out.splitlines():
line = line.strip()
if line:
entries.append(line)
return entries
def peek_file(fname, max_bytes):
LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
with open(fname, 'rb') as ifh:
return ifh.read(max_bytes)
def uniq_list(in_list):
out_list = []
for i in in_list:
if i in out_list:
continue
else:
out_list.append(i)
return out_list
def load_file(fname, read_cb=None, quiet=False):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
ofh = StringIO()
try:
with open(fname, 'rb') as ifh:
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
except IOError as e:
if not quiet:
raise
if e.errno != errno.ENOENT:
raise
contents = ofh.getvalue()
LOG.debug("Read %s bytes from %s", len(contents), fname)
return contents
def get_cmdline():
if 'DEBUG_PROC_CMDLINE' in os.environ:
cmdline = os.environ["DEBUG_PROC_CMDLINE"]
else:
try:
cmdline = load_file("/proc/cmdline").strip()
except:
cmdline = ""
return cmdline
def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
bytes_piped = 0
while True:
data = in_fh.read(chunk_size)
if data == '':
break
else:
out_fh.write(data)
bytes_piped += len(data)
if chunk_cb:
chunk_cb(bytes_piped)
out_fh.flush()
return bytes_piped
def chownbyid(fname, uid=None, gid=None):
if uid in [None, -1] and gid in [None, -1]:
# Nothing to do
return
LOG.debug("Changing the ownership of %s to %s:%s", fname, uid, gid)
os.chown(fname, uid, gid)
def chownbyname(fname, user=None, group=None):
uid = -1
gid = -1
try:
if user:
uid = pwd.getpwnam(user).pw_uid
if group:
gid = grp.getgrnam(group).gr_gid
except KeyError as e:
raise OSError("Unknown user or group: %s" % (e))
chownbyid(fname, uid, gid)
# Always returns well formated values
# cfg is expected to have an entry 'output' in it, which is a dictionary
# that includes entries for 'init', 'config', 'final' or 'all'
# init: /var/log/cloud.out
# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
# final:
# output: "| logger -p"
# error: "> /dev/null"
# this returns the specific 'mode' entry, cleanly formatted, with value
def get_output_cfg(cfg, mode):
ret = [None, None]
if not cfg or not 'output' in cfg:
return ret
outcfg = cfg['output']
if mode in outcfg:
modecfg = outcfg[mode]
else:
if 'all' not in outcfg:
return ret
# if there is a 'all' item in the output list
# then it applies to all users of this (init, config, final)
modecfg = outcfg['all']
# if value is a string, it specifies stdout and stderr
if isinstance(modecfg, str):
ret = [modecfg, modecfg]
# if its a list, then we expect (stdout, stderr)
if isinstance(modecfg, list):
if len(modecfg) > 0:
ret[0] = modecfg[0]
if len(modecfg) > 1:
ret[1] = modecfg[1]
# if it is a dictionary, expect 'out' and 'error'
# items, which indicate out and error
if isinstance(modecfg, dict):
if 'output' in modecfg:
ret[0] = modecfg['output']
if 'error' in modecfg:
ret[1] = modecfg['error']
# if err's entry == "&1", then make it same as stdout
# as in shell syntax of "echo foo >/dev/null 2>&1"
if ret[1] == "&1":
ret[1] = ret[0]
swlist = [">>", ">", "|"]
for i in range(len(ret)):
if not ret[i]:
continue
val = ret[i].lstrip()
found = False
for s in swlist:
if val.startswith(s):
val = "%s %s" % (s, val[len(s):].strip())
found = True
break
if not found:
# default behavior is append
val = "%s %s" % (">>", val.strip())
ret[i] = val
return ret
def logexc(log, msg, *args):
# Setting this here allows this to change
# levels easily (not always error level)
# or even desirable to have that much junk
# coming out to a non-debug stream
if msg:
log.warn(msg, *args)
# Debug gets the full trace
log.debug(msg, exc_info=1, *args)
def hash_blob(blob, routine, mlen=None):
hasher = hashlib.new(routine)
hasher.update(blob)
digest = hasher.hexdigest()
# Don't get to long now
if mlen is not None:
return digest[0:mlen]
else:
return digest
def is_user(name):
try:
if pwd.getpwnam(name):
return True
except KeyError:
return False
def is_group(name):
try:
if grp.getgrnam(name):
return True
except KeyError:
return False
def rename(src, dest):
LOG.debug("Renaming %s to %s", src, dest)
# TODO(harlowja) use a se guard here??
os.rename(src, dest)
def ensure_dirs(dirlist, mode=0755):
for d in dirlist:
ensure_dir(d, mode)
def read_write_cmdline_url(target_fn):
if not os.path.exists(target_fn):
try:
(key, url, content) = get_cmdline_url()
except:
logexc(LOG, "Failed fetching command line url")
return
try:
if key and content:
write_file(target_fn, content, mode=0600)
LOG.debug(("Wrote to %s with contents of command line"
" url %s (len=%s)"), target_fn, url, len(content))
elif key and not content:
LOG.debug(("Command line key %s with url"
" %s had no contents"), key, url)
except:
logexc(LOG, "Failed writing url content to %s", target_fn)
def yaml_dumps(obj):
formatted = yaml.dump(obj,
line_break="\n",
indent=4,
explicit_start=True,
explicit_end=True,
default_flow_style=False)
return formatted
def ensure_dir(path, mode=None):
if not os.path.isdir(path):
# Make the dir and adjust the mode
with SeLinuxGuard(os.path.dirname(path), recursive=True):
os.makedirs(path)
chmod(path, mode)
else:
# Just adjust the mode
chmod(path, mode)
@contextlib.contextmanager
def unmounter(umount):
try:
yield umount
finally:
if umount:
umount_cmd = ["umount", '-l', umount]
subp(umount_cmd)
def mounts():
mounted = {}
try:
# Go through mounts to see what is already mounted
if os.path.exists("/proc/mounts"):
mount_locs = load_file("/proc/mounts").splitlines()
method = 'proc'
else:
(mountoutput, _err) = subp("mount")
mount_locs = mountoutput.splitlines()
method = 'mount'
mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
for mpline in mount_locs:
# Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
# FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
try:
if method == 'proc':
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
else:
m = re.search(mountre, mpline)
dev = m.group(1)
mp = m.group(2)
fstype = m.group(3)
opts = m.group(4)
except:
continue
# If the name of the mount point contains spaces these
# can be escaped as '\040', so undo that..
mp = mp.replace("\\040", " ")
mounted[dev] = {
'fstype': fstype,
'mountpoint': mp,
'opts': opts,
}
LOG.debug("Fetched %s mounts from %s", mounted, method)
except (IOError, OSError):
logexc(LOG, "Failed fetching mount points")
return mounted
def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
"""
Mount the device, call method 'callback' passing the directory
in which it was mounted, then unmount. Return whatever 'callback'
returned. If data != None, also pass data to callback.
"""
mounted = mounts()
with tempdir() as tmpd:
umount = False
if device in mounted:
mountpoint = mounted[device]['mountpoint']
else:
try:
mountcmd = ['mount']
mountopts = []
if rw:
mountopts.append('rw')
else:
mountopts.append('ro')
if sync:
# This seems like the safe approach to do
# (ie where this is on by default)
mountopts.append("sync")
if mountopts:
mountcmd.extend(["-o", ",".join(mountopts)])
if mtype:
mountcmd.extend(['-t', mtype])
mountcmd.append(device)
mountcmd.append(tmpd)
subp(mountcmd)
umount = tmpd # This forces it to be unmounted (when set)
mountpoint = tmpd
except (IOError, OSError) as exc:
raise MountFailedError(("Failed mounting %s "
"to %s due to: %s") %
(device, tmpd, exc))
# Be nice and ensure it ends with a slash
if not mountpoint.endswith("/"):
mountpoint += "/"
with unmounter(umount):
if data is None:
ret = callback(mountpoint)
else:
ret = callback(mountpoint, data)
return ret
def get_builtin_cfg():
# Deep copy so that others can't modify
return obj_copy.deepcopy(CFG_BUILTIN)
def sym_link(source, link, force=False):
LOG.debug("Creating symbolic link from %r => %r", link, source)
if force and os.path.exists(link):
del_file(link)
os.symlink(source, link)
def del_file(path):
LOG.debug("Attempting to remove %s", path)
try:
os.unlink(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise e
def copy(src, dest):
LOG.debug("Copying %s to %s", src, dest)
shutil.copy(src, dest)
def time_rfc2822():
try:
ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
except:
ts = "??"
return ts
def uptime():
uptime_str = '??'
method = 'unknown'
try:
if os.path.exists("/proc/uptime"):
method = '/proc/uptime'
contents = load_file("/proc/uptime").strip()
if contents:
uptime_str = contents.split()[0]
elif os.path.exists("/usr/sbin/acct/fwtmp"): # for AIX support
method = '/usr/sbin/acct/fwtmp'
import commands
contents = commands.getoutput('/usr/sbin/acct/fwtmp < /var/adm/wtmp | /usr/bin/grep "system boot" 2>/dev/null')
if contents:
bootup = contents.splitlines()[-1].split()[6]
now = time.time()
uptime_str = now - float(bootup)
else:
method = 'ctypes'
libc = ctypes.CDLL('/lib/libc.so.7')
size = ctypes.c_size_t()
buf = ctypes.c_int()
size.value = ctypes.sizeof(buf)
libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
ctypes.byref(size), None, 0)
now = time.time()
bootup = buf.value
uptime_str = now - bootup
except:
logexc(LOG, "Unable to read uptime using method: %s" % method)
return uptime_str
def append_file(path, content):
write_file(path, content, omode="ab", mode=None)
def ensure_file(path, mode=0644):
write_file(path, content='', omode="ab", mode=mode)
def safe_int(possible_int):
try:
return int(possible_int)
except (ValueError, TypeError):
return None
def chmod(path, mode):
real_mode = safe_int(mode)
if path and real_mode:
with SeLinuxGuard(path):
os.chmod(path, real_mode)
def write_file(filename, content, mode=0644, omode="wb"):
"""
Writes a file with the given content and sets the file mode as specified.
Resotres the SELinux context if possible.
@param filename: The full path of the file to write.
@param content: The content to write to the file.
@param mode: The filesystem mode to set on the file.
@param omode: The open mode used when opening the file (r, rb, a, etc.)
"""
ensure_dir(os.path.dirname(filename))
LOG.debug("Writing to %s - %s: [%s] %s bytes",
filename, omode, mode, len(content))
with SeLinuxGuard(path=filename):
with open(filename, omode) as fh:
fh.write(content)
fh.flush()
chmod(filename, mode)
def delete_dir_contents(dirname):
"""
Deletes all contents of a directory without deleting the directory itself.
@param dirname: The directory whose contents should be deleted.
"""
for node in os.listdir(dirname):
node_fullpath = os.path.join(dirname, node)
if os.path.isdir(node_fullpath):
del_dir(node_fullpath)
else:
del_file(node_fullpath)
def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
logstring=False):
if rcs is None:
rcs = [0]
try:
if not logstring:
LOG.debug(("Running command %s with allowed return codes %s"
" (shell=%s, capture=%s)"), args, rcs, shell, capture)
else:
LOG.debug(("Running hidden command to protect sensitive "
"input/output logstring: %s"), logstring)
if not capture:
stdout = None
stderr = None
else:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
stdin = subprocess.PIPE
sp = subprocess.Popen(args, stdout=stdout,
stderr=stderr, stdin=stdin,
env=env, shell=shell)
(out, err) = sp.communicate(data)
except OSError as e:
raise ProcessExecutionError(cmd=args, reason=e)
rc = sp.returncode # pylint: disable=E1101
if rc not in rcs:
raise ProcessExecutionError(stdout=out, stderr=err,
exit_code=rc,
cmd=args)
# Just ensure blank instead of none?? (iff capturing)
if not out and capture:
out = ''
if not err and capture:
err = ''
return (out, err)
def make_header(comment_char="#", base='created'):
ci_ver = version.version_string()
header = str(comment_char)
header += " %s by cloud-init v. %s" % (base.title(), ci_ver)
header += " on %s" % time_rfc2822()
return header
def abs_join(*paths):
return os.path.abspath(os.path.join(*paths))
# shellify, takes a list of commands
# for each entry in the list
# if it is an array, shell protect it (with single ticks)
# if it is a string, do nothing
def shellify(cmdlist, add_header=True):
content = ''
if add_header:
content += "#!/bin/sh\n"
escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
cmds_made = 0
for args in cmdlist:
# If the item is a list, wrap all items in single tick.
# If its not, then just write it directly.
if isinstance(args, list):
fixed = []
for f in args:
fixed.append("'%s'" % (str(f).replace("'", escaped)))
content = "%s%s\n" % (content, ' '.join(fixed))
cmds_made += 1
elif isinstance(args, (str, basestring)):
content = "%s%s\n" % (content, args)
cmds_made += 1
else:
raise RuntimeError(("Unable to shellify type %s"
" which is not a list or string")
% (type_utils.obj_name(args)))
LOG.debug("Shellified %s commands.", cmds_made)
return content
def strip_prefix_suffix(line, prefix=None, suffix=None):
if prefix and line.startswith(prefix):
line = line[len(prefix):]
if suffix and line.endswith(suffix):
line = line[:-len(suffix)]
return line
def is_container():
"""
Checks to see if this code running in a container of some sort
"""
for helper in CONTAINER_TESTS:
try:
# try to run a helper program. if it returns true/zero
# then we're inside a container. otherwise, no
subp([helper])
return True
except (IOError, OSError):
pass
# this code is largely from the logic in
# ubuntu's /etc/init/container-detect.conf
try:
# Detect old-style libvirt
# Detect OpenVZ containers
pid1env = get_proc_env(1)
if "container" in pid1env:
return True
if "LIBVIRT_LXC_UUID" in pid1env:
return True
except (IOError, OSError):
pass
# Detect OpenVZ containers
if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"):
return True
try:
# Detect Vserver containers
lines = load_file("/proc/self/status").splitlines()
for line in lines:
if line.startswith("VxID:"):
(_key, val) = line.strip().split(":", 1)
if val != "0":
return True
except (IOError, OSError):
pass
return False
def get_proc_env(pid):
"""
Return the environment in a dict that a given process id was started with.
"""
env = {}
fn = os.path.join("/proc/", str(pid), "environ")
try:
contents = load_file(fn)
toks = contents.split("\x00")
for tok in toks:
if tok == "":
continue
(name, val) = tok.split("=", 1)
if name:
env[name] = val
except (IOError, OSError):
pass
return env
def keyval_str_to_dict(kvstring):
ret = {}
for tok in kvstring.split():
try:
(key, val) = tok.split("=", 1)
except ValueError:
key = tok
val = True
ret[key] = val
return ret
def is_partition(device):
if device.startswith("/dev/"):
device = device[5:]
return os.path.isfile("/sys/class/block/%s/partition" % device)
def expand_package_list(version_fmt, pkgs):
# we will accept tuples, lists of tuples, or just plain lists
if not isinstance(pkgs, list):
pkgs = [pkgs]
pkglist = []
for pkg in pkgs:
if isinstance(pkg, basestring):
pkglist.append(pkg)
continue
if isinstance(pkg, (tuple, list)):
if len(pkg) < 1 or len(pkg) > 2:
raise RuntimeError("Invalid package & version tuple.")
if len(pkg) == 2 and pkg[1]:
pkglist.append(version_fmt % tuple(pkg))
continue
pkglist.append(pkg[0])
else:
raise RuntimeError("Invalid package type.")
return pkglist
def parse_mount_info(path, mountinfo_lines, log=LOG):
"""Return the mount information for PATH given the lines from
/proc/$$/mountinfo."""
path_elements = [e for e in path.split('/') if e]
devpth = None
fs_type = None
match_mount_point = None
match_mount_point_elements = None
for i, line in enumerate(mountinfo_lines):
parts = line.split()
# Completely fail if there is anything in any line that is
# unexpected, as continuing to parse past a bad line could
# cause an incorrect result to be returned, so it's better
# return nothing than an incorrect result.
# The minimum number of elements in a valid line is 10.
if len(parts) < 10:
log.debug("Line %d has two few columns (%d): %s",
i + 1, len(parts), line)
return None
mount_point = parts[4]
mount_point_elements = [e for e in mount_point.split('/') if e]
# Ignore mounts deeper than the path in question.
if len(mount_point_elements) > len(path_elements):
continue
# Ignore mounts where the common path is not the same.
l = min(len(mount_point_elements), len(path_elements))
if mount_point_elements[0:l] != path_elements[0:l]:
continue
# Ignore mount points higher than an already seen mount
# point.
if (match_mount_point_elements is not None and
len(match_mount_point_elements) > len(mount_point_elements)):
continue
# Find the '-' which terminates a list of optional columns to
# find the filesystem type and the path to the device. See
# man 5 proc for the format of this file.
try:
i = parts.index('-')
except ValueError:
log.debug("Did not find column named '-' in line %d: %s",
i + 1, line)
return None
# Get the path to the device.
try:
fs_type = parts[i + 1]
devpth = parts[i + 2]
except IndexError:
log.debug("Too few columns after '-' column in line %d: %s",
i + 1, line)
return None
match_mount_point = mount_point
match_mount_point_elements = mount_point_elements
if devpth and fs_type and match_mount_point:
return (devpth, fs_type, match_mount_point)
else:
return None
def parse_mtab(path):
"""On older kernels there's no /proc/$$/mountinfo, so use mtab."""
for line in load_file("/etc/mtab").splitlines():
devpth, mount_point, fs_type = line.split()[:3]
if mount_point == path:
return devpth, fs_type, mount_point
return None
def parse_mount(path):
(mountoutput, _err) = subp("mount")
mount_locs = mountoutput.splitlines()
for line in mount_locs:
m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
devpth = m.group(1)
mount_point = m.group(2)
fs_type = m.group(3)
if mount_point == path:
return devpth, fs_type, mount_point
return None
def get_mount_info(path, log=LOG):
# Use /proc/$$/mountinfo to find the device where path is mounted.
# This is done because with a btrfs filesystem using os.stat(path)
# does not return the ID of the device.
#
# Here, / has a device of 18 (decimal).
#
# $ stat /
# File: '/'
# Size: 234 Blocks: 0 IO Block: 4096 directory
# Device: 12h/18d Inode: 256 Links: 1
# Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
# Access: 2013-01-13 07:31:04.358011255 +0000
# Modify: 2013-01-13 18:48:25.930011255 +0000
# Change: 2013-01-13 18:48:25.930011255 +0000
# Birth: -
#
# Find where / is mounted:
#
# $ mount | grep ' / '
# /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo)
#
# And the device ID for /dev/vda1 is not 18:
#
# $ ls -l /dev/vda1
# brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1
#
# So use /proc/$$/mountinfo to find the device underlying the
# input path.
mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
if os.path.exists(mountinfo_path):
lines = load_file(mountinfo_path).splitlines()
return parse_mount_info(path, lines, log)
elif os.path.exists("/etc/mtab"):
return parse_mtab(path)
else:
return parse_mount(path)
def which(program):
# Return path of program for execution if found in path
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
_fpath, _ = os.path.split(program)
if _fpath:
if is_exe(program):
return program
else:
for path in os.environ.get("PATH", "").split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
if args is None:
args = []
if kwargs is None:
kwargs = {}
start = time.time()
ustart = None
if get_uptime:
try:
ustart = float(uptime())
except ValueError:
pass
try:
ret = func(*args, **kwargs)
finally:
delta = time.time() - start
udelta = None
if ustart is not None:
try:
udelta = float(uptime()) - ustart
except ValueError:
pass
tmsg = " took %0.3f seconds" % delta
if get_uptime:
if isinstance(udelta, (float)):
tmsg += " (%0.2f)" % udelta
else:
tmsg += " (N/A)"
try:
logfunc(msg + tmsg)
except:
pass
return ret
def expand_dotted_devname(dotted):
toks = dotted.rsplit(".", 1)
if len(toks) > 1:
return toks
else:
return (dotted, None)
def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
# return a dictionary populated with keys in 'required' and 'optional'
# by reading files in prefix + delim + entry
if required is None:
required = []
if optional is None:
optional = []
missing = []
ret = {}
for f in required + optional:
try:
ret[f] = load_file(base + delim + f, quiet=False)
except IOError as e:
if e.errno != errno.ENOENT:
raise
if f in required:
missing.append(f)
if len(missing):
raise ValueError("Missing required files: %s", ','.join(missing))
return ret
| gpl-3.0 | -5,898,552,899,357,104,000 | 29.063017 | 123 | 0.560221 | false |
google/ldif | ldif/util/math_util.py | 1 | 3602 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for mathematical operations."""
import itertools
import math
import numpy as np
import tensorflow as tf
def int_log2(i):
"""Computes the floor of the base 2 logarithm of an integer."""
log2 = 0
while i >= 2:
log2 += 1
i = i >> 1
return log2
def nonzero_mean(tensor):
"""The mean over nonzero values in a tensor."""
num = tf.reduce_sum(tensor)
denom = tf.cast(tf.count_nonzero(tensor), dtype=tf.float32)
denom = tf.where(denom == 0.0, 1e-8, denom)
return tf.divide(num, denom)
def increase_frequency(t, out_dim, flatten=False, interleave=True):
"""Maps elements of a tensor to a higher frequency, higher dimensional space.
As shown in NeRF (https://arxiv.org/pdf/2003.08934.pdf), this can help
networks learn higher frequency functions more easily since they are typically
biased to low frequency functions. By increasing the frequency of the input
signal, such biases are mitigated.
Args:
t: Tensor with any shape. Type tf.float32. The normalization of the input
dictates how many dimensions are needed to avoid periodicity. The NeRF
paper normalizes all inputs to the range [0, 1], which is safe.
out_dim: How many (sine, cosine) pairs to generate for each element of t.
Referred to as 'L' in NeRF. Integer.
flatten: Whether to flatten the output tensor to have the same rank as t.
Boolean. See returns section for details.
interleave: Whether to interleave the sin and cos results, as described in
the paper. If true, then the vector will contain [sin(2^0*t_i*pi),
cos(2^0*t_i*pi), sin(2^1*t_i*pi), ...]. If false, some operations will be
avoided, but the order will be [sin(2^0*t_i*pi), sin(2^1*t_i*pi), ...,
cos(2^0*t_i*pi), cos(2^1*t_i*pi), ...].
Returns:
Tensor of type tf.float32. Has shape [..., out_dim*2] if flatten is false.
If flatten is true, then if t has shape [..., N] then the output will have
shape [..., N*out_dim*2].
"""
# TODO(kgenova) Without a custom kernel this is somewhat less efficient,
# because the sin and cos results have to be next to one another in the output
# but tensorflow only allows computing them with two different ops. Thus it is
# necessary to do some expensive tf.concats. It probably won't be a bottleneck
# in most pipelines.
t = math.pi * t
scales = np.power(2, np.arange(out_dim, dtype=np.int32)).astype(np.float32)
t_rank = len(t.shape)
scale_shape = [1] * t_rank + [out_dim]
scales = tf.constant(np.reshape(scales, scale_shape), dtype=tf.float32)
scaled = tf.expand_dims(t, axis=-1) * scales
sin = tf.sin(scaled)
cos = tf.cos(scaled)
output = tf.concat([sin, cos], axis=-1)
if interleave:
sines = tf.unstack(sin, axis=-1)
cosines = tf.unstack(cos, axis=-1)
output = tf.stack(list(itertools.chain(*zip(sines, cosines))), axis=-1)
if flatten:
t_shape = t.get_shape().as_list()
output = tf.reshape(output, t_shape[:-1] + [t_shape[-1] * out_dim * 2])
return output
| apache-2.0 | -8,889,287,189,012,497,000 | 39.022222 | 80 | 0.691838 | false |
lundjordan/slaveapi | slaveapi/clients/bugzilla.py | 1 | 2329 | from ..global_state import bugzilla_client
import logging
import urllib
from requests import HTTPError
log = logging.getLogger(__name__)
class Bug(object):
def __init__(self, id_, loadInfo=True):
self.id_ = id_
self.data = {}
if loadInfo:
self.refresh()
def refresh(self):
try:
self.data = bugzilla_client.get_bug(self.id_)
self.id_ = self.data["id"]
except HTTPError, e:
log.debug('HTTPError - %s' % e)
def add_comment(self, comment, data={}):
return bugzilla_client.add_comment(self.id_, comment, data)
def update(self, data):
return bugzilla_client.update_bug(self.id_, data)
class ProblemTrackingBug(Bug):
product = "Release Engineering"
component = "Buildduty"
def __init__(self, slave_name, *args, **kwargs):
self.slave_name = slave_name
self.reboot_bug = None
Bug.__init__(self, slave_name, *args, **kwargs)
def create(self):
data = {
"product": self.product,
"component": self.component,
"summary": "%s problem tracking" % self.slave_name,
"version": "other",
"alias": self.slave_name,
# todo: do we care about setting these correctly?
"op_sys": "All",
"platform": "All"
}
resp = bugzilla_client.create_bug(data)
self.id_ = resp["id"]
reboot_product = "Infrastructure & Operations"
reboot_component = "DCOps"
reboot_summary = "%(slave)s is unreachable"
def get_reboot_bug(slave):
qs = "?product=%s&component=%s" % (urllib.quote(reboot_product), urllib.quote(reboot_component))
qs += "&blocks=%s&resolution=---" % slave.bug.id_
summary = reboot_summary % {"slave": slave.name}
for bug in bugzilla_client.request("GET", "bug" + qs)["bugs"]:
if bug["summary"] == summary:
return Bug(bug["id"])
else:
return None
def file_reboot_bug(slave):
data = {
"product": reboot_product,
"component": reboot_component,
"summary": reboot_summary % {"slave": slave.name},
"version": "other",
"op_sys": "All",
"platform": "All",
"blocks": slave.bug.id_,
}
resp = bugzilla_client.create_bug(data)
return Bug(resp["id"])
| mpl-2.0 | 151,448,738,641,104,300 | 29.246753 | 100 | 0.573637 | false |
antoine-de/navitia | source/jormungandr/jormungandr/modules/v1_routing/v1_routing.py | 1 | 10328 | # coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr.interfaces.v1 import Uri, Coverage, Journeys, GraphicalIsochrone, \
HeatMap, Schedules, Places, Ptobjects, Coord, Disruptions, Calendars, \
converters_collection_type, Status, GeoStatus, JSONSchema, LineReports
from werkzeug.routing import BaseConverter, FloatConverter, PathConverter
from jormungandr.modules_loader import AModule
from jormungandr import app
from jormungandr.modules.v1_routing.resources import Index
class RegionConverter(BaseConverter):
""" The region you want to query"""
type_ = str
regex = '[^(/;)]+'
def __init__(self, *args, **kwargs):
BaseConverter.__init__(self, *args, **kwargs)
class LonConverter(FloatConverter):
""" The longitude of where the coord you want to query"""
type_ = float
regex = '-?\\d+(\\.\\d+)?'
def __init__(self, *args, **kwargs):
FloatConverter.__init__(self, *args, **kwargs)
class LatConverter(FloatConverter):
""" The latitude of where the coord you want to query"""
type_ = float
regex = '-?\\d+(\\.\\d+)?'
def __init__(self, *args, **kwargs):
FloatConverter.__init__(self, *args, **kwargs)
class UriConverter(PathConverter):
"""First part of the uri"""
type_ = str
def __init__(self, *args, **kwargs):
PathConverter.__init__(self, *args, **kwargs)
class IdConverter(BaseConverter):
"""Id of the object you want to query"""
type_ = str
def __init__(self, *args, **kwargs):
BaseConverter.__init__(self, *args, **kwargs)
class V1Routing(AModule):
def __init__(self, api, name):
super(V1Routing, self).__init__(api, name,
description='Current version of navitia API',
status='current',
index_endpoint='index')
def setup(self):
self.api.app.url_map.converters['region'] = RegionConverter
self.api.app.url_map.converters['lon'] = LonConverter
self.api.app.url_map.converters['lat'] = LatConverter
self.api.app.url_map.converters['uri'] = UriConverter
self.api.app.url_map.converters['id'] = IdConverter
self.api.app.url_map.strict_slashes = False
self.module_resources_manager.register_resource(Index.Index())
self.add_resource(Index.Index,
'/',
'',
endpoint='index')
self.module_resources_manager.register_resource(Index.TechnicalStatus())
self.add_resource(Index.TechnicalStatus,
'/status',
endpoint='technical_status')
lon_lat = '<lon:lon>;<lat:lat>/'
coverage = '/coverage/'
region = coverage + '<region:region>/'
coord = coverage + lon_lat
self.add_resource(Coverage.Coverage,
coverage,
region,
coord,
endpoint='coverage')
self.add_resource(Coord.Coord,
'/coord/' + lon_lat,
'/coords/' + lon_lat,
endpoint='coord')
collecs = list(converters_collection_type.collections_to_resource_type.keys())
for collection in collecs:
# we want to hide the connections apis, as they are only for debug
hide = collection == 'connections'
self.add_resource(getattr(Uri, collection)(True),
region + collection,
coord + collection,
region + '<uri:uri>/' + collection,
coord + '<uri:uri>/' + collection,
endpoint=collection + '.collection', hide=hide)
if collection == 'connections':
# connections api cannot be query by id
continue
self.add_resource(getattr(Uri, collection)(False),
region + collection + '/<id:id>',
coord + collection + '/<id:id>',
region + '<uri:uri>/' + collection + '/<id:id>',
coord + '<uri:uri>/' + collection + '/<id:id>',
endpoint=collection + '.id', hide=hide)
collecs = ["routes", "lines", "line_groups", "networks", "stop_areas", "stop_points",
"vehicle_journeys"]
for collection in collecs:
self.add_resource(getattr(Uri, collection)(True),
'/' + collection,
endpoint=collection + '.external_codes')
self.add_resource(Places.Places,
region + 'places',
coord + 'places',
'/places',
endpoint='places')
self.add_resource(Ptobjects.Ptobjects,
region + 'pt_objects',
coord + 'pt_objects',
endpoint='pt_objects')
self.add_resource(Places.PlaceUri,
'/places/<id:id>',
region + 'places/<id:id>',
coord + 'places/<id:id>',
endpoint='place_uri')
self.add_resource(Places.PlacesNearby,
region + 'places_nearby',
coord + 'places_nearby',
region + '<uri:uri>/places_nearby',
coord + '<uri:uri>/places_nearby',
'/coord/' + lon_lat + 'places_nearby',
'/coords/' + lon_lat + 'places_nearby',
endpoint='places_nearby')
self.add_resource(Journeys.Journeys,
region + '<uri:uri>/journeys',
coord + '<uri:uri>/journeys',
region + 'journeys',
coord + 'journeys',
'/journeys',
endpoint='journeys',
# we don't want to document those routes as we consider them deprecated
hide_routes=(region + '<uri:uri>/journeys', coord + '<uri:uri>/journeys'))
if app.config['GRAPHICAL_ISOCHRONE']:
self.add_resource(GraphicalIsochrone.GraphicalIsochrone,
region + 'isochrones',
endpoint='isochrones')
if app.config.get('HEAT_MAP'):
self.add_resource(HeatMap.HeatMap,
region + 'heat_maps',
endpoint='heat_maps')
self.add_resource(Schedules.RouteSchedules,
region + '<uri:uri>/route_schedules',
coord + '<uri:uri>/route_schedules',
'/route_schedules',
endpoint='route_schedules')
self.add_resource(Schedules.NextArrivals,
region + '<uri:uri>/arrivals',
coord + '<uri:uri>/arrivals',
region + 'arrivals',
coord + 'arrivals',
endpoint='arrivals')
self.add_resource(Schedules.NextDepartures,
region + '<uri:uri>/departures',
coord + '<uri:uri>/departures',
region + 'departures',
coord + 'departures',
endpoint='departures')
self.add_resource(Schedules.StopSchedules,
region + '<uri:uri>/stop_schedules',
coord + '<uri:uri>/stop_schedules',
'/stop_schedules',
endpoint='stop_schedules')
self.add_resource(Disruptions.TrafficReport,
region + 'traffic_reports',
region + '<uri:uri>/traffic_reports',
endpoint='traffic_reports')
self.add_resource(LineReports.LineReports,
region + 'line_reports',
region + '<uri:uri>/line_reports',
endpoint='line_reports')
self.add_resource(Status.Status,
region + 'status',
endpoint='status')
self.add_resource(GeoStatus.GeoStatus,
region + '_geo_status',
endpoint='geo_status')
self.add_resource(Calendars.Calendars,
region + 'calendars',
region + '<uri:uri>/calendars',
region + "calendars/<id:id>",
endpoint="calendars")
self.add_resource(JSONSchema.Schema,
'/schema',
endpoint="schema")
| agpl-3.0 | -5,484,635,130,334,410,000 | 40.477912 | 100 | 0.512006 | false |
Tocknicsu/nctuoj | backend/service/verdict.py | 1 | 5475 | from service.base import BaseService
from req import Service
from utils.form import form_validation
import os
import config
class VerdictService(BaseService):
def __init__(self, db, rs):
super().__init__(db, rs)
VerdictService.inst = self
def get_verdict_list(self, data={}):
# res = self.rs.get('verdict_list')
# if res: return (None, res)
required_args = [{
'name': 'problem_id',
'type': int,
}]
err = form_validation(data, required_args)
if err: return (err, None)
sql = "SELECT v.*, u.account as setter_user FROM verdicts as v, users as u WHERE v.setter_user_id=u.id"
param = tuple()
if 'problem_id' in data and data['problem_id']:
sql += ' AND (v.problem_id=%s OR v.problem_id=0)'
param = (data['problem_id'],)
res = yield self.db.execute(sql, param)
res = res.fetchall()
# self.rs.set('verdict_list', res)
return (None, res)
def get_verdict_type(self):
# res = self.rs.get('verdict_type')
# if res: return (None, res)
res = { x['id']: x for x in (yield self.db.execute("SELECT * FROM map_verdict_string order by id"))}
# self.rs.set('verdict_type', res)
return (None, res)
def get_verdict(self, data={}):
required_args = [{
'name': '+id',
'type': int,
}]
err = form_validation(data, required_args)
if err: return (err, None)
if int(data['id']) == 0:
col = ['id', 'title', 'execute_type_id', 'execute_type_id', 'file_name', 'setter_user_id']
res = {x: '' for x in col}
res['id'] = 0
return (None, res)
# res = self.rs.get('verdict@%s'%str(data['id']))
# if res: return (None, res)
res = yield self.db.execute('SELECT v.*, u.account as setter_user FROM verdicts as v, users as u WHERE v.id=%s AND v.setter_user_id=u.id;', (data['id'],))
if res.rowcount == 0:
return ((404, 'No Verdict ID'), None)
res = res.fetchone()
err, res['execute_type'] = yield from Service.Execute.get_execute({'id': res['execute_type_id']})
folder = '%s/data/verdicts/%s/' % (config.DATAROOT, str(res['id']))
file_path = '%s/%s' % (folder, res['file_name'])
try: os.makedirs(folder)
except: pass
with open(file_path) as f:
res['code'] = f.read()
res['code_line'] = len(open(file_path).readlines())
# self.rs.set('verdict@%s'%(str(data['id'])), res)
return (None, res)
def post_verdict(self ,data={}):
required_args = [{
'name': '+title',
'type': str,
}, {
'name': '+execute_type_id',
'type': int,
}, {
'name': '+setter_user_id',
'type': int,
}, {
'name': '+code_file',
}]
err = form_validation(data, required_args)
if err: return (err, None)
code_file = None
if data['code_file'] is None:
return ((400, 'No code file'), None)
data['file_name'] = data['code_file']['filename']
code_file = data.pop('code_file')
sql, param = self.gen_insert_sql('verdicts', data)
id = (yield self.db.execute(sql, param)).fetchone()['id']
if code_file:
folder = '%s/data/verdicts/%s/' % (config.DATAROOT, str(id))
file_path = '%s/%s' % (folder, data['file_name'])
try: shutil.rmtree(folder)
except: pass
try: os.makedirs(folder)
except: pass
with open(file_path, 'wb+') as f:
f.write(code_file['body'])
return (None, str(id))
def put_verdict(self ,data={}):
required_args = [{
'name': '+id',
'type': int,
}, {
'name': '+title',
'type': str,
}, {
'name': '+execute_type_id',
'type': int,
}, {
'name': '+setter_user_id',
'type': int,
}, {
'name': 'code_file',
}]
err = form_validation(data, required_args)
if err: return (err, None)
code_file = data.pop('code_file')
if code_file: data['file_name'] = code_file['filename']
sql, param = self.gen_update_sql('verdicts', data)
id = data.pop('id')
yield self.db.execute(sql+' WHERE id=%s;', param+(id,))
if code_file:
folder = '%s/data/verdicts/%s/' % (config.DATAROOT, str(id))
file_path = '%s/%s' % (folder, data['file_name'])
try: shutil.rmtree(folder)
except: pass
try: os.makedirs(folder)
except: pass
with open(file_path, 'wb+') as f:
f.write(code_file['body'])
# self.rs.delete('verdict@%s'%(str(id)))
# self.rs.delete('verdict_list')
return (None, str(id))
def delete_verdict(self, data={}):
required_args = [{
'name': '+id',
'type': int,
}]
err = form_validation(data, required_args)
if err: return (err, None)
yield self.db.execute('DELETE FROM verdicts WHERE id=%s;', (data['id'],))
# self.rs.delete('verdict_list')
# self.rs.delete('verdict@%s'%(str(data['id'])))
return (None, str(data['id']))
| mit | -6,884,955,607,823,547,000 | 35.019737 | 162 | 0.498265 | false |
gynvael/stream | 017-osdev-06/build.py | 1 | 1266 | #!/usr/bin/python
import os
import subprocess
def fix_stage1_size():
stage2_size = os.stat("stage2").st_size
kernel_size = os.stat("kernel64").st_size
stage2_size = (stage2_size + kernel_size + 511) / 512
if stage2_size >= 255:
raise Exception("stage2 & kernel are too large")
with open("stage1", "rb+") as f:
d = f.read()
idx = d.index("\xb0\xcc\x90\x90")
d = bytearray(d)
d[idx+1] = stage2_size
f.seek(0)
f.write(d)
cc_flags = "-std=c99 -nostdlib -o kernel64 -O3 -Wall -Wextra -masm=intel"
cmds_to_run = [
"gcc kernel.c " + cc_flags,
"strip kernel64",
"nasm stage1.asm",
"nasm stage2.asm",
fix_stage1_size
]
files_to_img = [
"stage1",
"stage2",
"kernel64"
]
for cmd in cmds_to_run:
if type(cmd) is str:
print "Running:", cmd
print subprocess.check_output(cmd, shell=True)
else:
print "Calling:", cmd.func_name
cmd()
buf = []
for fn in files_to_img:
with open(fn, "rb") as f:
d = f.read()
buf.append(d)
if len(d) % 512 == 0:
continue
padding_size = 512 - len(d) % 512
buf.append("\0" * padding_size);
with open("floppy.bin", "wb") as f:
f.write(''.join(buf))
| mit | -4,116,388,021,985,915,400 | 19.1 | 73 | 0.554502 | false |
googleapis/python-dialogflow-cx | google/cloud/dialogflowcx_v3/types/entity_type.py | 1 | 13987 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dialogflow.cx.v3",
manifest={
"EntityType",
"ListEntityTypesRequest",
"ListEntityTypesResponse",
"GetEntityTypeRequest",
"CreateEntityTypeRequest",
"UpdateEntityTypeRequest",
"DeleteEntityTypeRequest",
},
)
class EntityType(proto.Message):
r"""Entities are extracted from user input and represent parameters that
are meaningful to your application. For example, a date range, a
proper name such as a geographic location or landmark, and so on.
Entities represent actionable data for your application.
When you define an entity, you can also include synonyms that all
map to that entity. For example, "soft drink", "soda", "pop", and so
on.
There are three types of entities:
- **System** - entities that are defined by the Dialogflow API for
common data types such as date, time, currency, and so on. A
system entity is represented by the ``EntityType`` type.
- **Custom** - entities that are defined by you that represent
actionable data that is meaningful to your application. For
example, you could define a ``pizza.sauce`` entity for red or
white pizza sauce, a ``pizza.cheese`` entity for the different
types of cheese on a pizza, a ``pizza.topping`` entity for
different toppings, and so on. A custom entity is represented by
the ``EntityType`` type.
- **User** - entities that are built for an individual user such as
favorites, preferences, playlists, and so on. A user entity is
represented by the
[SessionEntityType][google.cloud.dialogflow.cx.v3.SessionEntityType]
type.
For more information about entity types, see the `Dialogflow
documentation <https://cloud.google.com/dialogflow/docs/entities-overview>`__.
Attributes:
name (str):
The unique identifier of the entity type. Required for
[EntityTypes.UpdateEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.UpdateEntityType].
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/entityTypes/<Entity Type ID>``.
display_name (str):
Required. The human-readable name of the
entity type, unique within the agent.
kind (google.cloud.dialogflowcx_v3.types.EntityType.Kind):
Required. Indicates the kind of entity type.
auto_expansion_mode (google.cloud.dialogflowcx_v3.types.EntityType.AutoExpansionMode):
Indicates whether the entity type can be
automatically expanded.
entities (Sequence[google.cloud.dialogflowcx_v3.types.EntityType.Entity]):
The collection of entity entries associated
with the entity type.
excluded_phrases (Sequence[google.cloud.dialogflowcx_v3.types.EntityType.ExcludedPhrase]):
Collection of exceptional words and phrases that shouldn't
be matched. For example, if you have a size entity type with
entry ``giant``\ (an adjective), you might consider adding
``giants``\ (a noun) as an exclusion. If the kind of entity
type is ``KIND_MAP``, then the phrases specified by entities
and excluded phrases should be mutually exclusive.
enable_fuzzy_extraction (bool):
Enables fuzzy entity extraction during
classification.
redact (bool):
Indicates whether parameters of the entity
type should be redacted in log. If redaction is
enabled, page parameters and intent parameters
referring to the entity type will be replaced by
parameter name when logging.
"""
class Kind(proto.Enum):
r"""Represents kinds of entities."""
KIND_UNSPECIFIED = 0
KIND_MAP = 1
KIND_LIST = 2
KIND_REGEXP = 3
class AutoExpansionMode(proto.Enum):
r"""Represents different entity type expansion modes. Automated
expansion allows an agent to recognize values that have not been
explicitly listed in the entity (for example, new kinds of
shopping list items).
"""
AUTO_EXPANSION_MODE_UNSPECIFIED = 0
AUTO_EXPANSION_MODE_DEFAULT = 1
class Entity(proto.Message):
r"""An **entity entry** for an associated entity type.
Attributes:
value (str):
Required. The primary value associated with this entity
entry. For example, if the entity type is *vegetable*, the
value could be *scallions*.
For ``KIND_MAP`` entity types:
- A canonical value to be used in place of synonyms.
For ``KIND_LIST`` entity types:
- A string that can contain references to other entity
types (with or without aliases).
synonyms (Sequence[str]):
Required. A collection of value synonyms. For example, if
the entity type is *vegetable*, and ``value`` is
*scallions*, a synonym could be *green onions*.
For ``KIND_LIST`` entity types:
- This collection must contain exactly one synonym equal to
``value``.
"""
value = proto.Field(proto.STRING, number=1,)
synonyms = proto.RepeatedField(proto.STRING, number=2,)
class ExcludedPhrase(proto.Message):
r"""An excluded entity phrase that should not be matched.
Attributes:
value (str):
Required. The word or phrase to be excluded.
"""
value = proto.Field(proto.STRING, number=1,)
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
kind = proto.Field(proto.ENUM, number=3, enum=Kind,)
auto_expansion_mode = proto.Field(proto.ENUM, number=4, enum=AutoExpansionMode,)
entities = proto.RepeatedField(proto.MESSAGE, number=5, message=Entity,)
excluded_phrases = proto.RepeatedField(
proto.MESSAGE, number=6, message=ExcludedPhrase,
)
enable_fuzzy_extraction = proto.Field(proto.BOOL, number=7,)
redact = proto.Field(proto.BOOL, number=9,)
class ListEntityTypesRequest(proto.Message):
r"""The request message for
[EntityTypes.ListEntityTypes][google.cloud.dialogflow.cx.v3.EntityTypes.ListEntityTypes].
Attributes:
parent (str):
Required. The agent to list all entity types for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
language_code (str):
The language to list entity types for. The following fields
are language dependent:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
page_size (int):
The maximum number of items to return in a
single page. By default 100 and at most 1000.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(proto.STRING, number=1,)
language_code = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListEntityTypesResponse(proto.Message):
r"""The response message for
[EntityTypes.ListEntityTypes][google.cloud.dialogflow.cx.v3.EntityTypes.ListEntityTypes].
Attributes:
entity_types (Sequence[google.cloud.dialogflowcx_v3.types.EntityType]):
The list of entity types. There will be a maximum number of
items returned based on the page_size field in the request.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
entity_types = proto.RepeatedField(proto.MESSAGE, number=1, message="EntityType",)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.GetEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.GetEntityType].
Attributes:
name (str):
Required. The name of the entity type. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/entityTypes/<Entity Type ID>``.
language_code (str):
The language to retrieve the entity type for. The following
fields are language dependent:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
name = proto.Field(proto.STRING, number=1,)
language_code = proto.Field(proto.STRING, number=2,)
class CreateEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.CreateEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.CreateEntityType].
Attributes:
parent (str):
Required. The agent to create a entity type for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
entity_type (google.cloud.dialogflowcx_v3.types.EntityType):
Required. The entity type to create.
language_code (str):
The language of the following fields in ``entity_type``:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
parent = proto.Field(proto.STRING, number=1,)
entity_type = proto.Field(proto.MESSAGE, number=2, message="EntityType",)
language_code = proto.Field(proto.STRING, number=3,)
class UpdateEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.UpdateEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.UpdateEntityType].
Attributes:
entity_type (google.cloud.dialogflowcx_v3.types.EntityType):
Required. The entity type to update.
language_code (str):
The language of the following fields in ``entity_type``:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The mask to control which fields get updated.
"""
entity_type = proto.Field(proto.MESSAGE, number=1, message="EntityType",)
language_code = proto.Field(proto.STRING, number=2,)
update_mask = proto.Field(
proto.MESSAGE, number=3, message=field_mask_pb2.FieldMask,
)
class DeleteEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.DeleteEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.DeleteEntityType].
Attributes:
name (str):
Required. The name of the entity type to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/entityTypes/<Entity Type ID>``.
force (bool):
This field has no effect for entity type not being used. For
entity types that are used by intents or pages:
- If ``force`` is set to false, an error will be returned
with message indicating the referencing resources.
- If ``force`` is set to true, Dialogflow will remove the
entity type, as well as any references to the entity type
(i.e. Page
[parameter][google.cloud.dialogflow.cx.v3.Form.Parameter]
of the entity type will be changed to '@sys.any' and
intent
[parameter][google.cloud.dialogflow.cx.v3.Intent.Parameter]
of the entity type will be removed).
"""
name = proto.Field(proto.STRING, number=1,)
force = proto.Field(proto.BOOL, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 1,964,519,015,032,665,000 | 40.017595 | 109 | 0.645028 | false |
opennode/nodeconductor | waldur_core/cost_tracking/tasks.py | 1 | 2343 | from celery import shared_task
from waldur_core.cost_tracking import CostTrackingRegister, models
from waldur_core.structure import models as structure_models
@shared_task(name='waldur_core.cost_tracking.recalculate_estimate')
def recalculate_estimate(recalculate_total=False):
""" Recalculate price of consumables that were used by resource until now.
Regular task. It is too expensive to calculate consumed price on each
request, so we store cached price each hour.
If recalculate_total is True - task also recalculates total estimate
for current month.
"""
# Celery does not import server.urls and does not discover cost tracking modules.
# So they should be discovered implicitly.
CostTrackingRegister.autodiscover()
# Step 1. Recalculate resources estimates.
for resource_model in CostTrackingRegister.registered_resources:
for resource in resource_model.objects.all():
_update_resource_consumed(resource, recalculate_total=recalculate_total)
# Step 2. Move from down to top and recalculate consumed estimate for each
# object based on its children.
ancestors_models = [m for m in models.PriceEstimate.get_estimated_models()
if not issubclass(m, structure_models.ResourceMixin)]
for model in ancestors_models:
for ancestor in model.objects.all():
_update_ancestor_consumed(ancestor)
def _update_resource_consumed(resource, recalculate_total):
price_estimate, created = models.PriceEstimate.objects.get_or_create_current(scope=resource)
if created:
models.ConsumptionDetails.objects.create(price_estimate=price_estimate)
price_estimate.create_ancestors()
price_estimate.update_total()
elif recalculate_total:
price_estimate.update_total()
price_estimate.update_consumed()
def _update_ancestor_consumed(ancestor):
price_estimate, _ = models.PriceEstimate.objects.get_or_create_current(scope=ancestor)
resource_descendants = [descendant for descendant in price_estimate.get_descendants()
if isinstance(descendant.scope, structure_models.ResourceMixin)]
price_estimate.consumed = sum([descendant.consumed for descendant in resource_descendants])
price_estimate.save(update_fields=['consumed'])
| mit | 8,238,126,947,783,257,000 | 47.8125 | 96 | 0.729834 | false |
sgenoud/scikit-learn | examples/tree/plot_overfitting_cv.py | 1 | 2056 | """
====================================================
Comparison of cross validated score with overfitting
====================================================
These two plots compare the cross validated score of a the regression of
a simple function. We see that before the maximum value of 7 the regression is
far for the real function. On the other hand, for higher number of leaves we
clearly overfit.
"""
print __doc__
import numpy as np
from sklearn import tree
def plot_pruned_path(scores, with_std=True):
"""Plots the cross validated scores versus the number of leaves of trees"""
import matplotlib.pyplot as plt
means = np.array([np.mean(s) for s in scores])
stds = np.array([np.std(s) for s in scores]) / np.sqrt(len(scores[1]))
x = range(len(scores) + 1, 1, -1)
plt.plot(x, means)
if with_std:
plt.plot(x, means + 2 * stds, lw=1, c='0.7')
plt.plot(x, means - 2 * stds, lw=1, c='0.7')
plt.xlabel('Number of leaves')
plt.ylabel('Cross validated score')
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[1::5] += 3 * (0.5 - rng.rand(16))
clf = tree.DecisionTreeRegressor(max_depth=20)
scores = tree.prune_path(clf, X, y, max_n_leaves=20,
n_iterations=100, random_state=0)
plot_pruned_path(scores)
clf = tree.DecisionTreeRegressor(max_depth=20, n_leaves=15)
clf.fit(X, y)
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
#Prepare the different pruned level
y_15 = clf.predict(X_test)
clf = clf.prune(6)
y_7 = clf.predict(X_test)
clf = clf.prune(2)
y_2 = clf.predict(X_test)
# Plot the results
import pylab as pl
pl.figure()
pl.scatter(X, y, c="k", label="data")
pl.plot(X_test, y_2, c="g", label="n_leaves=2", linewidth=2)
pl.plot(X_test, y_7, c="b", label="n_leaves=7", linewidth=2)
pl.plot(X_test, y_15, c="r", label="n_leaves=15", linewidth=2)
pl.xlabel("data")
pl.ylabel("target")
pl.title("Decision Tree Regression with levels of pruning")
pl.legend()
pl.show()
| bsd-3-clause | 4,750,765,594,037,181,000 | 27.555556 | 79 | 0.625973 | false |
wodo/WebTool3 | webtool/server/models/event.py | 1 | 20122 | # -*- coding: utf-8 -*-
from django.db import models
from django.template.defaultfilters import date, time
from .reference import Reference
from .equipment import Equipment
from .approximate import Approximate
from .mixins import SeasonMixin, DescriptionMixin
from .time_base import TimeMixin
from . import fields
class EventManager(models.Manager):
def get_by_natural_key(self, season, reference):
reference = Reference.get_reference(reference, season)
return reference.event
class Event(SeasonMixin, TimeMixin, DescriptionMixin, models.Model):
"""
The option (blank=True, default='') for CharField describes an optional element
field == '' => data is not available
field != '' => data is Valid
The option (blank=True, null=True) for the other fields describes an optional element
field is None => data is not available
field is not None => data is Valid
"""
objects = EventManager()
# noinspection PyUnresolvedReferences
reference = models.OneToOneField(
'Reference',
primary_key=True,
verbose_name='Buchungscode',
related_name='event',
on_delete=models.PROTECT,
)
location = fields.LocationField()
reservation_service = models.BooleanField(
'Reservierungswunsch für Schulungsraum',
db_index=True,
blank=True, default=False
)
start_date = models.DateField(
'Abreisetag',
db_index=True
)
start_time = models.TimeField(
'Abreisezeit (Genau)',
blank=True, null=True,
help_text="Je nach Abreisezeit wird eventuell Urlaub benötgit",
)
# approximate is valid only if start_time is None
approximate = models.ForeignKey(
Approximate,
db_index=True,
verbose_name='Abreisezeit (Ungefähr)',
related_name='event_list',
blank=True, null=True,
help_text="Je nach Abreisezeit wird eventuell Urlaub benötigt",
on_delete=models.PROTECT,
)
end_date = models.DateField(
'Rückkehr',
blank=True, null=True,
help_text="Nur wenn die Veranstaltung mehr als einen Tag dauert",
)
end_time = models.TimeField(
'Rückkehrzeit',
blank=True, null=True,
help_text="z.B. Ungefähr bei Touren/Kursen - Genau bei Vorträgen",
)
link = models.URLField(
'Beschreibung',
blank=True, default='',
help_text="Eine URL zur Veranstaltungsbeschreibung auf der Homepage",
)
map = models.FileField(
'Kartenausschnitt',
blank=True, default='',
help_text="Eine URL zu einem Kartenausschnitt des Veranstaltungsgebietes",
)
distal = models.BooleanField(
'Mit gemeinsamer Anreise',
db_index=True,
blank=True, default=False,
)
# rendezvous, source and distance valid only, if distal_event == True
rendezvous = fields.LocationField(
'Treffpunkt',
help_text="Treffpunkt für die Abfahrt z.B. Edelweissparkplatz",
)
source = fields.LocationField(
'Ausgangsort',
help_text="Treffpunkt vor Ort",
)
public_transport = models.BooleanField(
'Öffentliche Verkehrsmittel',
db_index=True,
blank=True, default=False
)
# distance valid only, if public_transport == False
distance = fields.DistanceField()
# lea valid only, if public_transport == True
lea = models.BooleanField(
'Low Emission Adventure',
db_index=True,
blank=True, default=False
)
new = models.BooleanField(
'Markierung für Neue Veranstaltungen',
db_index=True,
blank=True, default=False
)
shuttle_service = models.BooleanField(
'Reservierungswunsch für AlpinShuttle',
db_index=True,
blank=True, default=False
)
# check event.season == instruction.topic.season
# noinspection PyUnresolvedReferences
instruction = models.ForeignKey(
'Instruction',
db_index=True,
blank=True, null=True,
verbose_name='Kurs',
related_name='meeting_list',
on_delete=models.PROTECT,
)
def natural_key(self):
return self.season.name, str(self.reference)
natural_key.dependencies = ['server.season', 'server.reference']
def __str__(self):
if hasattr(self, 'meeting') and not self.meeting.is_special:
title = self.meeting.topic.title
else:
title = self.title
return "{} - {}, {} [{}]".format(self.reference, title, self.long_date(with_year=True), self.season.name)
def long_date(self, with_year=False, with_time=False):
"""
:param with_year: False
5. September
22. bis 25. Januar
28. Mai bis 3. Juni
30. Dezember 2016 bis 6. Januar 2017
:param with_year: True
5. September 2016
22. bis 25. Januar 2016
28. Mai bis 3. Juni 2016
30. Dezember 2016 bis 6. Januar 2017
:return: long formatted date
"""
y = ' Y' if with_year else ''
if self.end_date is None or self.start_date == self.end_date:
value = date(self.start_date, "j. F" + y)
if with_time and self.start_time:
if self.end_time is None or self.start_time == self.end_time:
if self.start_time.minute:
if self.start_time.minute < 10:
minute = time(self.start_time, "i")[1:]
else:
minute = time(self.start_time, "i")
value = "{}, {}.{}".format(value, time(self.start_time, "G"), minute)
else:
value = "{}, {}".format(value, time(self.start_time, "G"))
else:
if self.end_time.minute:
if self.start_time.minute < 10:
minute = time(self.start_time, "i")[1:]
else:
minute = time(self.start_time, "i")
value = "{}, {}.{}".format(value, time(self.start_time, "G"), minute)
else:
value = "{} bis {}".format(value, time(self.start_time, "G"))
value = "{} Uhr".format(value)
return value
elif self.start_date.month == self.end_date.month and self.start_date.year == self.end_date.year:
return "{0} bis {1}".format(date(self.start_date, "j."), date(self.end_date, "j. F" + y))
elif self.start_date.month != self.end_date.month:
y0 = ''
if self.start_date.year != self.end_date.year:
y0 = y = ' Y'
return "{0} bis {1}".format(date(self.start_date, "j. F" + y0), date(self.end_date, "j. F" + y))
def short_date(self, with_year=False):
"""
:param with_year: False
05.09.
22.01 - 25.01.
28.05. - 03.06.
:param with_year: True
05.09.2016
22.01.2016 - 25.01.2016
28.05.2016 - 03.06.2016
:return: short formatted date
"""
y = 'Y' if with_year else ''
if self.end_date is None or self.start_date == self.end_date:
return date(self.start_date, "d.m." + y)
return "{0} - {1}".format(date(self.start_date, "d.m." + y), date(self.end_date, "d.m." + y))
def departure(self):
"""
{start_date}, {start_time}, {rendezvous}, Heimkehr am {end_date} gegen {end_time} Uhr
"""
season_year = int(self.season.name)
with_year = season_year != self.start_date.year or (self.end_date and season_year != self.end_date.year)
y = 'Y' if with_year else ''
start_date = date(self.start_date, "j.n." + y)
if self.start_time:
if self.start_time.minute:
start_time = time(self.start_time, "G.i")
else:
start_time = time(self.start_time, "G")
start_time = "{} Uhr".format(start_time)
else:
start_time = self.approximate.name if self.approximate else ''
if self.end_date and self.end_date != self.start_date:
end_date = date(self.end_date, "j.n." + y)
else:
end_date = ''
if self.end_time:
if self.end_time.minute:
end_time = time(self.end_time, "G.i")
else:
end_time = time(self.end_time, "G")
else:
end_time = ''
departure = "{}, {}".format(start_date, start_time)
if self.rendezvous:
departure = "{}, {}".format(departure, self.rendezvous)
if end_time:
departure = "{}, Heimkehr".format(departure)
if end_date:
departure = "{} am {}".format(departure, end_date)
departure = "{} gegen {} Uhr".format(departure, end_time)
return departure
def appointment(self):
"""
{start_date}, {start_time} Uhr, {name}, {location}, {rendezvous}
{start_date}, {start_time} bis {end_time} Uhr, {name}, {location}, {rendezvous}
{start_date}, {start_time} Uhr bis {end_date}, {end_time} Uhr, {name}, {location}, {rendezvous}
{start_date}, {start_time} Uhr bis {end_date}, {name}, {location}, {rendezvous}
"""
appointment = ''
season_year = int(self.season.name)
with_year = season_year != self.start_date.year or (self.end_date and season_year != self.end_date.year)
y = 'Y' if with_year else ''
start_date = date(self.start_date, "j.n." + y)
end_date = date(self.end_date, "j.n." + y) if self.end_date else ''
approximate = ''
if self.start_time:
if self.start_time.minute:
start_time = time(self.start_time, "G.i")
else:
start_time = time(self.start_time, "G")
elif self.approximate:
start_time = ''
approximate = self.approximate.name
else:
start_time = ''
if self.end_time:
if self.end_time.minute:
end_time = time(self.end_time, "G.i")
else:
end_time = time(self.end_time, "G")
else:
end_time = ''
if start_time:
appointment = "{}, {}".format(start_date, start_time)
if end_time:
if end_date:
appointment = "{} Uhr bis {}, {} Uhr".format(appointment, end_date, end_time)
else:
appointment = "{} bis {} Uhr".format(appointment, end_time)
else:
appointment = "{} Uhr".format(appointment)
if approximate:
appointment = "{}, {}".format(start_date, approximate)
if self.name:
appointment = "{}, {}".format(appointment, self.name)
if self.location:
appointment = "{}, {}".format(appointment, self.location)
if self.rendezvous:
appointment = "{}, {}".format(appointment, self.rendezvous)
return appointment
def prefixed_date(self, prefix, formatter, with_year=False):
"""
Beispiel: "Anmeldung bis 10.03."
:param prefix:
:param formatter: a unbound methode like short_date or long_date
:param with_year:
:return:
"""
return "{} {}".format(prefix, formatter(self, with_year))
@property
def activity(self):
if hasattr(self, 'tour') and self.tour:
return "tour"
if hasattr(self, 'talk') and self.talk:
return "talk"
if hasattr(self, 'meeting') and self.meeting:
return "topic"
if hasattr(self, 'session') and self.session:
return "collective"
@property
def division(self):
winter = self.reference.category.winter
summer = self.reference.category.summer
indoor = self.reference.category.climbing
if winter and not summer and not indoor:
return "winter"
elif not winter and summer and not indoor:
return "summer"
elif not winter and not summer and indoor:
return "indoor"
else:
return "misc"
@property
def state(self):
if hasattr(self, 'tour') and self.tour:
state = self.tour.state
elif hasattr(self, 'talk') and self.talk:
state = self.talk.state
elif hasattr(self, 'meeting') and self.meeting:
state = self.meeting.state
elif hasattr(self, 'session') and self.session:
state = self.session.state
else:
return None
if state:
if state.done:
return "done"
if state.moved:
return "moved"
if state.canceled:
return "canceled"
if state.unfeasible:
return "unfeasible"
if state.public:
return "public"
else:
return "private"
@property
def quantity(self):
if hasattr(self, 'tour') and self.tour:
min_quantity = self.tour.min_quantity
max_quantity = self.tour.max_quantity
cur_quantity = self.tour.cur_quantity
elif hasattr(self, 'talk') and self.talk:
min_quantity = self.talk.min_quantity
max_quantity = self.talk.max_quantity
cur_quantity = self.talk.cur_quantity
elif hasattr(self, 'meeting') and self.meeting:
min_quantity = self.meeting.min_quantity
max_quantity = self.meeting.max_quantity
cur_quantity = self.meeting.cur_quantity
else:
return None
return {
"min": min_quantity,
"max": max_quantity,
"current": cur_quantity
}
@property
def admission(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.admission
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.admission
if hasattr(self, 'talk') and self.talk:
return self.talk.admission
@property
def extra_charges(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.extra_charges
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.extra_charges
@property
def extra_charges_info(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.extra_charges_info
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.extra_charges_info
@property
def advances(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.advances
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.advances
@property
def advances_info(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.advances_info
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.advances_info
@property
def speaker(self):
if hasattr(self, 'talk') and self.talk:
return self.talk.speaker
if hasattr(self, 'session') and self.session:
return self.session.speaker
return None
@property
def guide(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.guide
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.guide
if hasattr(self, 'session') and self.session:
return self.session.guide
@property
def guides(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.guides()
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.guides()
if hasattr(self, 'session') and self.session:
return self.session.guides()
@property
def skill(self):
skill = None
if hasattr(self, 'tour') and self.tour:
skill = self.tour.skill
if hasattr(self, 'session') and self.session:
skill = self.session.skill if skill.code != "x" else None
return skill.order if skill else None
@property
def fitness(self):
fitness = None
if hasattr(self, 'tour') and self.tour:
fitness = self.tour.fitness
if hasattr(self, 'session') and self.session:
fitness = self.session.fitness if fitness.code != "x" else None
return fitness.order if fitness else None
@property
def ladies_only(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.ladies_only
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.ladies_only
if hasattr(self, 'session') and self.session:
return self.session.ladies_only
return False
@property
def youth_on_tour(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.youth_on_tour
return False
@property
def preconditions(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.preconditions
if hasattr(self, 'meeting') and self.meeting:
if self.meeting.is_special:
return self.meeting.preconditions
else:
return self.meeting.topic.preconditions
return None
@property
def equipments(self):
equipments = Equipment.objects.none()
misc = ''
if hasattr(self, 'tour') and self.tour:
equipments = self.tour.equipments
misc = self.tour.misc_equipment
if hasattr(self, 'meeting') and self.meeting:
if self.meeting.is_special:
equipments = self.meeting.equipments
misc = self.meeting.misc_equipment
else:
equipments = self.meeting.topic.equipments
misc = self.meeting.topic.misc_equipment
if hasattr(self, 'session') and self.session:
equipments = self.session.equipments
misc = self.session.misc_equipment
equipment_list = []
for equipment in equipments.all():
equipment_list.append(dict(code=equipment.code, name=equipment.name))
equipments = {}
if equipment_list:
equipments.update(dict(list=equipment_list))
if misc:
equipments.update(dict(misc=misc))
return equipments if equipments else None
@property
def team(self):
team = None
if hasattr(self, 'tour') and self.tour:
team = self.tour.team
if hasattr(self, 'meeting') and self.meeting:
team = self.meeting.team
if hasattr(self, 'session') and self.session:
team = self.session.team
return ', '.join(team.values_list('user__username', flat=True)) if team else None
@property
def subject(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.subject()
if hasattr(self, 'talk') and self.talk:
return self.talk.subject()
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.subject()
if hasattr(self, 'session') and self.session:
return self.session.subject()
@property
def details(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.details()
if hasattr(self, 'talk') and self.talk:
return self.talk.details()
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.details()
if hasattr(self, 'session') and self.session:
return self.session.details()
class Meta:
get_latest_by = "updated"
verbose_name = "Veranstaltungstermin"
verbose_name_plural = "Veranstaltungstermine"
ordering = ('start_date', )
| bsd-2-clause | -9,152,673,757,879,513,000 | 32.855219 | 113 | 0.564147 | false |
deepmind/open_spiel | open_spiel/python/bots/is_mcts_test.py | 1 | 2548 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for Information Set MCTS bot.
This test mimics the basic C++ tests in algorithms/is_mcts_test.cc.
"""
# pylint: disable=g-unreachable-test-method
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import evaluate_bots
import pyspiel
SEED = 12983641
class ISMCTSBotTest(absltest.TestCase):
def ismcts_play_game(self, game):
evaluator = pyspiel.RandomRolloutEvaluator(1, SEED)
for final_policy_type in [
pyspiel.ISMCTSFinalPolicyType.NORMALIZED_VISIT_COUNT,
pyspiel.ISMCTSFinalPolicyType.MAX_VISIT_COUNT,
pyspiel.ISMCTSFinalPolicyType.MAX_VALUE
]:
bot = pyspiel.ISMCTSBot(SEED, evaluator, 5.0, 1000, -1, final_policy_type,
False, False)
bots = [bot] * game.num_players()
evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
bot = pyspiel.ISMCTSBot(SEED, evaluator, 5.0, 1000, 10, final_policy_type,
False, False)
bots = [bot] * game.num_players()
evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
bot = pyspiel.ISMCTSBot(SEED, evaluator, 5.0, 1000, 10, final_policy_type,
True, True)
bots = [bot] * game.num_players()
evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
def test_basic_sim_kuhn(self):
game = pyspiel.load_game("kuhn_poker")
self.ismcts_play_game(game)
game = pyspiel.load_game("kuhn_poker(players=3)")
self.ismcts_play_game(game)
def test_basic_sim_leduc(self):
game = pyspiel.load_game("leduc_poker")
self.ismcts_play_game(game)
game = pyspiel.load_game("leduc_poker(players=3)")
self.ismcts_play_game(game)
if __name__ == "__main__":
np.random.seed(SEED)
absltest.main()
| apache-2.0 | 1,481,365,194,119,847,400 | 35.4 | 80 | 0.68956 | false |
googleinterns/contextual-adjectives | generate_noun_to_adj_list/noun_to_adj_gen.py | 1 | 4308 | """Code to generate noun to adjective dictionary"""
import nltk
from nltk.tokenize.treebank import TreebankWordTokenizer
from bert_setup import Bert
class NounToAdjGen:
"""Add adjectives for nouns in dictionary noun_to_adj.
Attributes:
noun_to_adj : Noun to adjective dictionary.
tokenizer : An instance of nltk's tokenizer.
bert_model : An instance of class bert.
adj_tags : Tags of adjectives in nltk.
noun_tags : Tags of nouns in nltk.
noun_list : List of nouns that we are working on.
adj_list : List of adjectives that we are working on.
"""
def __init__(self, noun_list, adj_list):
"""Initializing noun to adjective dictionary."""
self.noun_to_adj = {}
for noun in noun_list:
self.noun_to_adj[noun] = []
# Use nltk treebank tokenizer
self.tokenizer = TreebankWordTokenizer()
# Initializing the bert class
self.bert_model = Bert()
# https://pythonprogramming.net/natural-language-toolkit-nltk-part-speech-tagging/
self.adj_tags = ['JJ', 'JJR', 'JJS']
self.noun_tags = ['NN', 'NNS', 'NNP', 'NNPS']
self.noun_list = noun_list
self.adj_list = adj_list
def add_to_dictionary(self, sentences, num_of_perturb):
"""Add adjectives for nouns by perturbing sentence to noun_to_adj.
Args:
sentences : The list of sentences for which to look up for nouns and adjs.
num_of_perturb : Number of perturbations you want to make for a word in a sentence
"""
for sent in sentences:
# Tokenizing and POS tagging the sentence
pos_inf = nltk.tag.pos_tag(self.tokenizer.tokenize(sent))
for idx, (word, tag) in enumerate(pos_inf):
word = word.lower()
if tag in self.noun_tags and word in self.noun_list:
valid_adj_index = []
if idx != 0:
valid_adj_index.append(idx-1)
if idx != (len(pos_inf)-1):
valid_adj_index.append(idx+1)
for adj_index in valid_adj_index:
word1, tag1 = pos_inf[adj_index]
word1 = word1.lower()
if tag1 in self.adj_tags and word1 in self.adj_list:
self.add_adjectives(sent, num_of_perturb, adj_index, word)
self.add_nouns(sent, num_of_perturb, idx, word1)
elif tag1 in self.adj_tags:
self.add_adjectives(sent, num_of_perturb, adj_index, word)
def add_adjectives(self, sent, num_of_perturb, adj_index, word):
"""Ask bert for suggestions for more adjectives and add their intersection
with adjectives list to the dictionary.
Args:
sent : The sentence for which use bert to find more adjectives.
num_of_perturb : Number of perturbations you want to make for a word in a sentence
adj_index : The index of the word need to be perturbed in the sentence.
word : The noun for which we are looking for adjectives
"""
token_score = self.bert_model.perturb_bert(sent, num_of_perturb, adj_index)
new_words = list(token_score.keys())
intersection = list(set(new_words) & set(self.adj_list))
intersection = [(a, token_score[a]) for a in intersection]
self.noun_to_adj[word].extend(intersection)
def add_nouns(self, sent, num_of_perturb, noun_index, word):
"""Ask bert for suggestions for more nouns and add their intersection with nouns
list to the dictionary.
Args:
sent : The sentence for which use bert to find more adjectives.
num_of_perturb : Number of perturbations you want to make for a word in a sentence
adj_index : The index of the word need to be perturbed in the sentence.
word : The noun for which we are looking for adjectives
"""
token_score = self.bert_model.perturb_bert(sent, num_of_perturb, noun_index)
new_words = list(token_score.keys())
for n_word in new_words:
if n_word in self.noun_list:
self.noun_to_adj[n_word].append((word, token_score[n_word]))
| apache-2.0 | 7,461,075,055,532,946,000 | 47.41573 | 90 | 0.600743 | false |
globus-labs/ripple | ripple/observers/ipc/ipc_observer.py | 1 | 5264 | import os
import time
import subprocess
import sys
import uuid
import json
import re
from ripple.observers.base_observer import BaseObserver
from ripple import logger, RippleConfig
class IPCObserver(BaseObserver):
"""
Set up the polling IPC monitor. It will use the
"ipcs" command to query for shared memory segments
and will report those that have been created and removed.
"""
def monitor(self):
self.segments = {}
self.poll(True)
while True:
time.sleep(5)
self.poll(False)
def poll(self, start=False):
"""
Use the ipcs command to get memory events and compare
them against active rules.
"""
segments = self.get_segments(start)
events = self.process_segments(segments)
print (events)
# now process the events against rules
for event in events:
self.check_rules(event)
def check_rules(self, event):
"""
Try to match a rule to this event. If nothing is found, return None
They look like this:
key shmid owner perms bytes nattch status
0x00000000 262145 ryan 600 393216 2 dest
"""
logger.debug("Checking rules")
# Iterate through rules and try to apply them
for rule in RippleConfig().rules[:]:
event_type = event['type']
if self.match_condition(event_type, rule):
# Currently putting in pathname as key, need to
# think of a better way to handle "other" information
send_event = {'event': {
'type': event_type,
'size': event['bytes'],
'key': event['key'],
'pathname': event['key'],
'path': event['key'],
'name': event['key'],
'shmid': event['shmid'],
'perms': event['perms'],
'owner': event['owner'],
'status': event['status'],
'uuid': str(uuid.uuid4()),
'hash': 'hashvalue'
}
}
print ("Sending event: %s" % send_event)
send_event.update(rule)
# Now push it down the queue
message = json.dumps(send_event)
RippleConfig().queue.put(message)
logger.debug("Sent data to queue")
return None
def match_condition(self, event_type, rule):
"""
Match the event against a rule's conditions.
"""
logger.debug("Matching rule conditions")
rule_event_type = rule['trigger']['event']
if event_type == rule_event_type:
logger.debug("Matching rule conditions: type MATCHED")
# Hm, might be worth adding perms, owner, status?
return True
return False
def process_segments(self, segments):
"""
Process the segments and return which are new and which have been removed.
"""
previous = dict(self.segments)
new = []
removed = []
for shmid, val in segments.items():
if shmid not in previous.keys():
new.append(val)
# update it in the global dict
self.segments[shmid] = val
else:
# it already existed, so ignore it
del previous[shmid]
for shmid, val in previous.items():
removed.append(val)
del self.segments[shmid]
# now convert these into events
events = []
for e in new:
e['type'] = 'Create'
if 'status' not in e:
e['status'] = 'other'
events.append(e)
for e in removed:
if 'status' not in e:
e['status'] = 'other'
e['type'] = 'Delete'
events.append(e)
return events
def get_segments(self, start=False):
"""
Use the icps command to get and return a dictionary of
segments.
"""
cmd = ["ipcs", "-a"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output, err = process.communicate()
output = output.decode("utf-8").split("\n")
keys = ['key', 'shmid', 'owner', 'perms', 'bytes', 'nattch',
'status']
segments = {}
for line in output:
# this should capture all keys
# note: it won't do queues vs mem vs sem etc.
if line[0:2] == '0x':
values = list(filter(None, line.split(" ")))
data = dict(zip(keys, values))
if start:
# print (data['shmid'])
self.segments[data['shmid']] = data
segments[data['shmid']] = data
return segments
def stop_monitoring(self):
"""
Terminate the monitor
"""
logger.debug("Terminating POSIX monitor.")
| apache-2.0 | -8,401,639,659,101,003,000 | 31.695652 | 86 | 0.492781 | false |
fgallina/gpycomplete | gpycomplete/__init__.py | 1 | 1179 | # This file is part of gpycomplete.
# gpycomplete is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# gpycomplete is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with gpycomplete. If not, see <http://www.gnu.org/licenses/>.
# gpycomplete is written from scratch by Fabian Ezequiel Gallina
# <fgallina at gnu dot org dot ar> but it is somehow based on the
# original pycomplete package from the http://python-mode.sf.net.
# gpycomplete allows inline completion and help for the python
# programing language within GNU/Emacs
import gpycomplete.main
import gpycomplete.complete
import gpycomplete.context
import gpycomplete.django
import gpycomplete.helpers
__all__ = ['main', 'complete', 'context', 'django', 'helpers']
VERSION = '0.3'
| gpl-3.0 | 3,645,051,106,348,817,400 | 39.655172 | 70 | 0.772689 | false |
Crowdcomputer/CroCoAPI | crocoapi/settings.py | 1 | 2668 | """
Django settings for CroCoAPI project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from settings_production import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
if DEBUG:
from settings_local import *
else:
from settings_production import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ui',
'api',
'rest_framework',
'rest_framework.authtoken',
'south',
'crispy_forms',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'crocoapi.urls'
WSGI_APPLICATION = 'crocoapi.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
# 'rest_framework.authentication.TokenAuthentication',
'api.authentication.TokenAppAuthentication',
),
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
# 'DEFAULT_MODEL_SERIALIZER_CLASS':
# 'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
'api.permissions.IsOwner'
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
CRISPY_TEMPLATE_PACK='bootstrap'
| gpl-2.0 | 2,834,945,857,428,308,500 | 25.415842 | 75 | 0.718891 | false |
jsxyhdy/myweather | weather/tests.py | 1 | 2035 | from django.test import TestCase
from weather.models import *
# Create your tests here.
class TestWeather(TestCase):
def setUp(self):
from weather.api_config import query_by_areaid
self.city = CityInfo(areaid='101010100',
nameen='beijing', namecn='北京',
districten='beijing', districtcn='北京',
proven='beijing', provcn='北京',
nationen='china', nationcn='中国')
self.city.save()
self.qstr = query_by_areaid(self.city.areaid)
def test_city_update_forecast(self):
from weather.models import update_forecast
import json
update_forecast(self.city, json.loads(self.qstr))
forecasts = Forecast.objects.filter(city=self.city)
for forecast in forecasts:
print(forecast.forecast_date, ',', forecast.cast_time, ',', forecast.last_update, ',',
forecast.weather_desc(),
forecast.sunraise, forecast.sunset, forecast.temperature_day, forecast.temperature_night)
def test_views(self):
from django.test import Client
c = Client()
resp = c.get('/')
self.assertEqual(resp.status_code, 200)
resp = c.get('/101010100')
self.assertEqual(resp.status_code, 200)
resp = c.get('/10101010')
self.assertEqual(resp.status_code, 404)
resp = c.get('/filtercity/北')
self.assertEqual(resp.status_code, 200)
def test_update_all_city(self):
from weather.models import update_all_city
print('update_all_city',CityInfo.objects.all())
update_all_city()
forecasts = Forecast.objects.filter(city=self.city)
for forecast in forecasts:
print(forecast.forecast_date, ',', forecast.cast_time, ',', forecast.last_update, ',',
forecast.weather_desc(),
forecast.sunraise, forecast.sunset, forecast.temperature_day, forecast.temperature_night)
| mit | 7,802,665,910,551,116,000 | 41.020833 | 107 | 0.601884 | false |
sbrodehl/HashCode | Final Round/best_solution_in_the_wuuuuuuurld.py | 1 | 19988 | from random import shuffle
from skimage.morphology import skeletonize, medial_axis
from tqdm import tqdm
from scipy import signal
import scipy.ndimage.filters as fi
import pickle
import glob
import bz2
import multiprocessing
from multiprocessing import Pool
from functools import partial
from IO import *
from Utilities import compute_solution_score, wireless_access, quasi_euclidean_dist, chessboard_dist
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
def place_routers_on_skeleton(d, cmethod):
wireless = np.where(d["graph"] == Cell.Wireless, 1, 0)
# perform skeletonization
skeleton = skeletonize(wireless)
med_axis = medial_axis(wireless)
skel = skeleton
# skel = med_axis
# get all skeleton positions
pos = []
for i in range(skel.shape[0]):
for j in range(skel.shape[1]):
if skel[i][j]:
pos.append((i, j))
budget = d['budget']
shuffle(pos)
max_num_routers = min([int(d['budget'] / d['price_router']), len(pos)])
print("Num of routers constrained by:")
print(" budget: %d" % int(int(d['budget'] / d['price_router'])))
print(" skeleton: %d" % len(pos))
for i in tqdm(range(max_num_routers), desc="Placing Routers"):
new_router = pos[i]
a, b = new_router
# check if remaining budget is enough
d["graph"][a][b] = Cell.Router
d, ret, cost = _add_cabel(d, new_router, budget)
budget -= cost
if not ret:
break
return d
def place_routers_on_skeleton_iterative(d, cmethod):
budget = d['budget']
R = d['radius']
max_num_routers = int(d['budget'] / d['price_router'])
coverage = np.where(d["graph"] == Cell.Wireless, 1, 0).astype(np.bool)
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
while budget > 0:
# perform skeletonization
# skeleton = skeletonize(coverage)
skeleton = medial_axis(coverage)
# get all skeleton positions
pos = np.argwhere(skeleton > 0).tolist()
# escape if no positions left
if not len(pos):
break
# get a random position
shuffle(pos)
a, b = pos[0]
# place router
d["graph"][a][b] = Cell.Router
d, ret, cost = _add_cabel(d, (a, b), budget)
if not ret:
print("No budget available!")
break
budget -= cost
# refresh wireless map by removing new coverage
m = wireless_access(a, b, R, d['graph']).astype(np.bool)
coverage[(a - R):(a + R + 1), (b - R):(b + R + 1)] &= ~m
pbar.update()
pbar.close()
return d
def place_routers_randomized(d, cmethod):
max_num_routers = int(d['budget'] / d['price_router'])
wireless = np.where(d["graph"] == Cell.Wireless, 0, 1)
print("Num of routers constrained by:")
print(" budget: %d" % int(int(d['budget'] / d['price_router'])))
budget = d['budget']
R = d['radius']
if cmethod == 'mst':
cost, succ, routers, idx, idy, dists = _mst(d, d['backbone'])
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
for i in pbar:
# generate random position for router
indices = np.argwhere(wireless == 0).tolist()
x, y = indices[np.random.randint(0, len(indices))]
if len(indices) == 0:
pbar.close()
print("No more suitable positions left!")
return d
# modify graph
if cmethod == 'bfs':
d["graph"][x][y] = Cell.Router
d, ret, cost = _add_cabel(d, (x, y), budget)
if ret:
budget -= cost
# refresh wireless map by removing new coverage
mask = wireless_access(x, y, R, d['graph'])
wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] |= mask.astype(np.bool)
else:
# no more budget left
pbar.close()
print("No budget available!")
return d
elif cmethod == 'mst':
tmp = d["graph"][x][y]
d["graph"][x][y] = Cell.Router
cost, succ, routers, idx, idy, dists = _mst(d, (x, y), routers, idx, idy, dists)
if succ and i < 10:
mask = wireless_access(x, y, R, d['graph'])
wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] |= mask.astype(np.bool)
else:
# reverse last router
d["graph"][x][y] = tmp
d = _place_mst_paths(d, routers, idx, idy, dists)
pbar.close()
print("No budget available!")
return d
pbar.update(max_num_routers)
return d
def _parallel_helper(position, radius, graph, offset=(0, 0)):
a, b = position
ux_min, uy_min = offset
a, b = a + ux_min, b + uy_min
mask = wireless_access(a, b, radius, graph)
return a, b, np.sum(np.nan_to_num(mask)), mask
def _parallel_counting_helper(position, radius, graph, scoring, offset=(0, 0)):
a, b = position
ux_min, uy_min = offset
a, b = a + ux_min, b + uy_min
mask = wireless_access(a, b, radius, graph)
wx_min, wx_max = np.max([0, (a - radius)]), np.min([scoring.shape[0], (a + radius + 1)])
wy_min, wy_max = np.max([0, (b - radius)]), np.min([scoring.shape[1], (b + radius + 1)])
# get the submask which is valid
dx, lx = np.abs(wx_min - (a - radius)), wx_max - wx_min
dy, ly = np.abs(wy_min - (b - radius)), wy_max - wy_min
return a, b, np.sum(np.multiply(scoring[wx_min:wx_max, wy_min:wy_max], np.nan_to_num(mask[dx:dx + lx, dy:dy + ly])))
def place_routers_randomized_by_score(d, cmethod):
# some constants
max_num_routers = int(d['budget'] / d['price_router'])
budget = d['budget']
R = d['radius']
wireless = np.where(d["graph"] == Cell.Wireless, 1, 0).astype(np.int8)
scoring = np.zeros(wireless.shape, dtype=np.float32) - 1
counting = np.zeros_like(scoring)
coverage = {}
print("Num of routers constrained by:")
print(" budget: %d" % max_num_routers)
fscore = d['name'] + ".scores"
fcov = d['name'] + ".coverage"
facc = d['name'] + ".counting"
compute_stuff = False
sample_files = glob.glob('output/' + facc)
if len(sample_files) and not compute_stuff:
print("Found accounting file.")
counting = pickle.load(bz2.BZ2File(sample_files[0], 'r'))
else:
compute_stuff = True
sample_files = glob.glob('output/' + fscore)
if len(sample_files) and not compute_stuff:
print("Found scoring file.")
scoring = pickle.load(bz2.BZ2File(sample_files[0], 'r'))
else:
compute_stuff = True
sample_files = glob.glob('output/' + fcov)
if len(sample_files) and not compute_stuff:
print("Found coverage file.")
coverage = pickle.load(bz2.BZ2File(sample_files[0], 'r'))
else:
compute_stuff = True
if compute_stuff:
# compute initial scoring, which will be updated during placing
positions = np.argwhere(wireless > 0).tolist()
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s, m in pool.imap_unordered(partial(_parallel_helper, radius=R, graph=d['original']), positions):
scoring[a][b] = s
coverage[(a, b)] = m
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s in pool.imap_unordered(
partial(_parallel_counting_helper, radius=R, graph=wireless, scoring=scoring), positions):
counting[a][b] = s
print("Saving scoring file.")
# save scoring to disk
pickle.dump(scoring, bz2.BZ2File('output/' + fscore, 'w'), pickle.HIGHEST_PROTOCOL)
print("Saving coverage file.")
# save coverage to disk
pickle.dump(coverage, bz2.BZ2File('output/' + fcov, 'w'), pickle.HIGHEST_PROTOCOL)
print("Saving counting file.")
# save coverage to disk
pickle.dump(counting, bz2.BZ2File('output/' + facc, 'w'), pickle.HIGHEST_PROTOCOL)
routers = []
idx, idy, dists = [], [], []
if cmethod == 'mst':
placed, cost, routers, idx, idy, dists = _mst(d, d['backbone'])
# choose routers by score and place them!
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
while budget > 0:
placement = None
max_score = scoring.max()
if max_score > 0:
possible_placements = np.argwhere(scoring == max_score).tolist()
score_count = {}
for pp in possible_placements:
score_count[(pp[0], pp[1])] = counting[pp[0]][pp[1]]
sorted_scores = sorted(score_count)
placement = next(iter(sorted_scores or []), None)
if placement is None:
print("No positions available!")
break
# update progress bar
pbar.update()
x, y = placement
cost = 0
placed = False
if cmethod == 'mst':
tmp = d["graph"][x][y]
d["graph"][x][y] = Cell.Router
placed, nbud, routers, idx, idy, dists = _mst(d, (x, y), routers, idx, idy, dists)
budget = d['budget'] - nbud
if not placed:
d["graph"][x][y] = tmp
routers = routers[:-1]
idx, idy, dists = idx[:-len(routers)], idy[:-len(routers)], dists[:-len(routers)]
else:
# bfs as default
# modify graph, add router and cables
d["graph"][x][y] = Cell.Router
d, placed, cost = _add_cabel(d, (x, y), budget)
# check if new path is not to expensive
if not placed:
print("No budget available!")
break
# update budget
budget -= cost
# prepare coverage and scoring for next round
# remove score for current router
wx_min, wx_max = np.max([0, (x - R)]), np.min([wireless.shape[0], (x + R + 1)])
wy_min, wy_max = np.max([0, (y - R)]), np.min([wireless.shape[1], (y + R + 1)])
# get the submask which is valid
dx, lx = np.abs(wx_min - (x - R)), wx_max - wx_min
dy, ly = np.abs(wy_min - (y - R)), wy_max - wy_min
# remove coverage from map
wireless[wx_min:wx_max, wy_min:wy_max] &= ~(coverage[(x, y)][dx:dx + lx, dy:dy + ly].astype(np.bool))
# nullify scores
scoring[wx_min:wx_max, wy_min:wy_max] = -1
ux_min, uy_min = np.max([0, (x - 2 * R)]), np.max([0, (y - 2 * R)])
ux_max, uy_max = np.min([wireless.shape[0], (x + 2 * R + 1)]), np.min([wireless.shape[1], (y + 2 * R + 1)])
# compute places to be updated
updating = wireless[ux_min:ux_max, uy_min:uy_max]
# get all position coordinates
positions = np.argwhere(updating).tolist()
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s, m in pool.imap_unordered(
partial(_parallel_helper, radius=R, graph=wireless, offset=(ux_min, uy_min)), positions):
scoring[a][b] = s
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s in pool.imap_unordered(
partial(_parallel_counting_helper, radius=R, graph=wireless, scoring=scoring,
offset=(ux_min, uy_min)), positions):
counting[a][b] = s
counting = np.multiply(counting, wireless)
# budget looks good, place all cables
if cmethod == 'mst':
d = _place_mst_paths(d, routers, idx, idy, dists)
pbar.close()
return d
def place_routers_by_convolution(d, cmethod):
max_num_routers = int(d['budget'] / d['price_router'])
# wireless = np.where(d["graph"] == Cell.Wireless, 1, 0).astype(np.float64)
wireless = np.where(d["graph"] == Cell.Wireless, 1, -1).astype(np.float64)
walls = np.where(d['graph'] <= Cell.Wall, 0, 1).astype(np.float64)
print("Num of routers constrained by:")
print(" budget: %d" % int(int(d['budget'] / d['price_router'])))
budget = d['budget']
R = d['radius']
r21 = 2 * R + 1
stdev = 6.6
# kernel = np.ones((2*R+1, 2*R+1))
# kernel = (_gkern2(2 * R + 1, 2) * 1e2)
kernel = (np.outer(signal.gaussian(r21, stdev), signal.gaussian(r21, stdev))).astype(np.float32)
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
while budget > 0:
# convolve
mat = signal.fftconvolve(wireless, kernel, mode='same')
found = False
while not found:
# get the max of the conv matrix
mat_max = mat.max()
max_positions = np.argwhere(mat == mat_max).tolist()
selected_pos = max_positions[np.random.randint(0, len(max_positions))]
# check if we have suitable positions left
if mat_max == -np.inf:
pbar.close()
print("No more suitable positions left!")
return d
x, y = selected_pos
# max can be on a wall position... ignore it
if d['graph'][x][y] <= Cell.Wall:
# pbar.write('> Optimal position on wall cell...')
mat[x][y] = -np.inf
else:
found = True
# update progress bar
pbar.update()
# modify graph
d["graph"][x][y] = Cell.Router
d, ret, cost = _add_cabel(d, (x, y), budget)
# check if new path is not to expensive
if ret:
budget -= cost
# refresh wireless map by removing new coverage
mask = wireless_access(x, y, R, d['graph'])
# wireless[(a - R):(a + R + 1), (b - R):(b + R + 1)] &= ~mask.astype(np.bool)
# wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] -= kernel
wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] = -1.0
else:
# we've not enough budget
pbar.close()
print("No budget available!")
return d
pbar.close()
return d
def _mst(d, new_router, routers=[], idx=[], idy=[], dists=[]):
new_id = len(routers)
# calc new router dists
for i, a in enumerate(routers):
dist = chessboard_dist(a, new_router)
if dist > 0:
idx.append(i)
idy.append(new_id)
dists.append(dist)
# add new router
routers.append(new_router)
# create matrix
mat = csr_matrix((dists, (idx, idy)), shape=(len(routers), len(routers)))
# minimal spanning tree
Tmat = minimum_spanning_tree(mat)
# check costs
cost = np.sum(Tmat) * d['price_backbone'] + (len(routers) - 1) * d['price_router']
succ = cost <= d['original_budget']
# return
return succ, cost, routers, idx, idy, dists
def find_chess_connection(a, b):
cables = []
dx, dy = np.abs(a[0] - b[0]) + 1, np.abs(a[1] - b[1]) + 1
xmin, ymin = np.min([a[0], b[0]]), np.min([a[1], b[1]])
path = np.zeros((dx, dy), dtype=np.bool)
path[a[0] - xmin][a[1] - ymin] = True
path[b[0] - xmin][b[1] - ymin] = True
r = [dx, dy]
amin = np.argmin(r)
flipped = False
if not path[0][0]:
path = np.flipud(path)
flipped = True
# set diagonal elements
for i in range(r[amin]):
path[i][i] = True
# set remaining straight elements
if amin == 0:
for i in range(np.abs(dx - dy)):
path[-1][r[amin] + i] = True
elif amin == 1:
for i in range(np.abs(dx - dy)):
path[r[amin] + i][-1] = True
if flipped:
path = np.flipud(path)
# select cables
for i, row in enumerate(path):
for j, col in enumerate(row):
if path[i][j]:
cables.append((i + xmin, j + ymin))
return cables
def find_connection(router_from, router_to):
cables = []
if router_from[0] < router_to[0]:
xr = range(router_from[0], router_to[0] + 1)
else:
xr = range(router_from[0], router_to[0] - 1, -1)
if router_from[1] < router_to[1]:
yr = range(router_from[1], router_to[1] + 1)
else:
yr = range(router_from[1], router_to[1] - 1, -1)
for x1 in xr:
cables.append((x1, router_from[1]))
for y1 in yr:
cables.append((router_to[0], y1))
return cables
def _place_mst_paths(d, routers, idx, idy, dists):
# calc mst
mat = csr_matrix((dists, (idx, idy)), shape=(len(routers), len(routers)))
Tmat = minimum_spanning_tree(mat).toarray()
# place cabels
for i, r in enumerate(Tmat):
for j, c in enumerate(r):
if Tmat[i, j] > 0:
cables = find_chess_connection(routers[i], routers[j])
for cable in cables:
if cable == d['backbone']:
continue
if d['graph'][cable] == Cell.Router:
d['graph'][cable] = Cell.ConnectedRouter
else:
d['graph'][cable] = Cell.Cable
for router in routers:
if router == d['backbone']:
continue
d['graph'][router] = Cell.ConnectedRouter
return d
def _add_cabel(d, new_router, remaining_budget):
path = _bfs(d, new_router)
cost = len(path) * d['price_backbone'] + d['price_router']
if cost <= remaining_budget:
for c in path:
if d['graph'][c] == Cell.Router:
d['graph'][c] = Cell.ConnectedRouter
else:
d['graph'][c] = Cell.Cable
return d, True, cost
return d, False, cost
def _bfs(d, start):
dx = [0, -1, 1]
dy = [0, -1, 1]
visited = np.zeros((d['height'], d['width']), dtype=np.bool)
parent = (np.zeros((d['height'], d['width']), dtype=np.int32) - 1).tolist()
queue = deque()
queue.append(start)
visited[start[0]][start[1]] = True
while queue:
cur = queue.popleft()
# check goal condition
if d['graph'][cur] >= Cell.ConnectedRouter or cur == d['backbone']:
# generate path from parent array
path = []
a = cur
while a != start:
path.append(a)
a = parent[a[0]][a[1]]
path.append(a)
return path[1:]
# add children
# check neighbors
for ddx in dx:
for ddy in dy:
if ddx == 0 and ddy == 0:
continue
child_x, child_y = cur[0] + ddx, cur[1] + ddy
# only if still in the grid
if 0 <= child_x < d['height'] and 0 <= child_y < d['width']:
child = (child_x, child_y)
# everything is "walkable" cells
if not visited[child[0]][child[1]]:
queue.append(child)
visited[child[0]][child[1]] = True
parent[child[0]][child[1]] = cur
return None
def _gkern2(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
# create nxn zeros
inp = np.zeros((kernlen, kernlen))
# set element at the middle to one, a dirac delta
inp[kernlen // 2, kernlen // 2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter mask
return fi.gaussian_filter(inp, nsig)
if __name__ == '__main__':
D = read_dataset('input/example.in')
budget = D['budget']
routers = [(3, 6), (3, 9)]
for r in routers:
# set routers
D['graph'][r[0], r[1]] = Cell.Router
D, placed, cost = _add_cabel(D, r, budget)
if not placed:
print("No budget available!")
break
budget -= cost
score = compute_solution_score(D)
print(score)
write_solution('output/example.out', D)
| apache-2.0 | 1,336,552,385,937,426,200 | 32.092715 | 120 | 0.537422 | false |
dcondrey/scrapy-spiders | newenglandfilm/newengland/spiders/newenglandfilm.py | 1 | 1830 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.crawler import Crawler
from scrapy.http import Request
from scrapy import signals
from scrapy.utils.project import get_project_settings
from my_settings import name_file, test_mode, difference_days
from twisted.internet import reactor
from datetime import datetime, timedelta
import re
print "Run spider NewenglandFilm"
file_output = open(name_file, 'a')
email_current_session = []
email_in_file = open(name_file, 'r').readlines()
if test_mode:
current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y')
else:
current_date = datetime.today().strftime('%m/%d/%Y')
class NewenglandFilm(Spider):
name = 'newenglandfilm'
allowed_domains = ["newenglandfilm.com"]
start_urls = ["http://newenglandfilm.com/jobs.htm"]
def parse(self, response):
sel = Selector(response)
for num_div in xrange(1, 31):
date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0]
email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})')
if current_date == date:
for address in email:
if address + "\n" not in email_in_file and address not in email_current_session:
file_output.write(address + "\n")
email_current_session.append(address)
print "Spider: NewenglandFilm. Email {0} added to file".format(address)
else:
print "Spider: NewenglandFilm. Email {0} already in the file".format(address) | mit | 808,298,491,493,015,200 | 37.826087 | 138 | 0.60929 | false |
astrobin/astrobin | astrobin_apps_payments/api/views/pricing_view.py | 1 | 1570 | import logging
from braces.views import JSONResponseMixin
from django.conf import settings
from django.http import HttpResponseBadRequest
from django.views import View
from rest_framework.authtoken.models import Token
from astrobin_apps_payments.services.pricing_service import PricingService
log = logging.getLogger('apps')
class PricingView(JSONResponseMixin, View):
def get(self, request, *args, **kwargs):
product = kwargs.pop('product', None) # type: str
currency = kwargs.pop('currency', None) # type: str
if product is None or product.lower() not in ('lite', 'premium', 'ultimate'):
log.error('pricing_view: invalid product: %s' % product)
return HttpResponseBadRequest("Invalid product")
if currency is None or currency.upper() not in settings.SUPPORTED_CURRENCIES:
log.error('pricing_view: invalid currency: %s' % currency)
return HttpResponseBadRequest("Unsupported currency")
user = None
if 'HTTP_AUTHORIZATION' in request.META:
token_in_header = request.META['HTTP_AUTHORIZATION'].replace('Token ', '')
token = Token.objects.get(key=token_in_header)
user = token.user
return self.render_json_response({
'fullPrice': PricingService.get_full_price(product.lower(), currency.upper()),
'discount': PricingService.get_discount_amount(product.lower(), currency.upper(), user=user),
'price': PricingService.get_price(product.lower(), currency.upper(), user=user)
})
| agpl-3.0 | -8,189,497,786,092,724,000 | 41.432432 | 105 | 0.67707 | false |
amitjamadagni/sympy | sympy/matrices/tests/test_sparse.py | 2 | 16649 | from sympy import S, Symbol, I, Rational, PurePoly
from sympy.matrices import Matrix, SparseMatrix, eye, zeros, ShapeError
from sympy.utilities.pytest import raises, XFAIL
def test_sparse_matrix():
def sparse_eye(n):
return SparseMatrix.eye(n)
def sparse_zeros(n):
return SparseMatrix.zeros(n)
# creation args
raises(TypeError, lambda: SparseMatrix(1, 2))
a = SparseMatrix((
(1, 0),
(0, 1)
))
assert SparseMatrix(a) == a
# test element assignment
a = SparseMatrix((
(1, 0),
(0, 1)
))
a[3] = 4
assert a[1, 1] == 4
a[3] = 1
a[0, 0] = 2
assert a == SparseMatrix((
(2, 0),
(0, 1)
))
a[1, 0] = 5
assert a == SparseMatrix((
(2, 0),
(5, 1)
))
a[1, 1] = 0
assert a == SparseMatrix((
(2, 0),
(5, 0)
))
assert a._smat == {(0, 0): 2, (1, 0): 5}
# test_multiplication
a = SparseMatrix((
(1, 2),
(3, 1),
(0, 6),
))
b = SparseMatrix((
(1, 2),
(3, 0),
))
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
x = Symbol("x")
c = b * Symbol("x")
assert isinstance(c, SparseMatrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c = 5 * b
assert isinstance(c, SparseMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
#test_power
A = SparseMatrix([[2, 3], [4, 5]])
assert (A**5)[:] == [6140, 8097, 10796, 14237]
A = SparseMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == [290, 262, 251, 448, 440, 368, 702, 954, 433]
# test_creation
x = Symbol("x")
a = SparseMatrix([[x, 0], [0, 0]])
m = a
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
b = SparseMatrix(2, 2, [x, 0, 0, 0])
m = b
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
assert a == b
S = sparse_eye(3)
S.row_del(1)
assert S == SparseMatrix([
[1, 0, 0],
[0, 0, 1]])
S = sparse_eye(3)
S.col_del(1)
assert S == SparseMatrix([
[1, 0],
[0, 0],
[0, 1]])
S = SparseMatrix.eye(3)
S[2, 1] = 2
S.col_swap(1, 0)
assert S == SparseMatrix([
[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
a = SparseMatrix(1, 2, [1, 2])
b = a.copy()
c = a.copy()
assert a[0] == 1
a.row_del(0)
assert a == SparseMatrix(0, 2, [])
b.col_del(1)
assert b == SparseMatrix(1, 1, [1])
# test_determinant
x, y = Symbol('x'), Symbol('y')
assert SparseMatrix(1, 1, [0]).det() == 0
assert SparseMatrix([[1]]).det() == 1
assert SparseMatrix(((-3, 2), (8, -5))).det() == -1
assert SparseMatrix(((x, 1), (y, 2*y))).det() == 2*x*y - y
assert SparseMatrix(( (1, 1, 1),
(1, 2, 3),
(1, 3, 6) )).det() == 1
assert SparseMatrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) )).det() == -289
assert SparseMatrix(( ( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16) )).det() == 0
assert SparseMatrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) )).det() == 275
assert SparseMatrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) )).det() == -55
assert SparseMatrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) )).det() == 11664
assert SparseMatrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) )).det() == 123
# test_submatrix
m0 = sparse_eye(4)
assert m0[:3, :3] == sparse_eye(3)
assert m0[2:4, 0:2] == sparse_zeros(2)
m1 = SparseMatrix(3, 3, lambda i, j: i + j)
assert m1[0, :] == SparseMatrix(1, 3, (0, 1, 2))
assert m1[1:3, 1] == SparseMatrix(2, 1, (2, 3))
m2 = SparseMatrix(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
assert m2[:, -1] == SparseMatrix(4, 1, [3, 7, 11, 15])
assert m2[-2:, :] == SparseMatrix([[8, 9, 10, 11], [12, 13, 14, 15]])
assert SparseMatrix([[1, 2], [3, 4]]).submatrix([1, 1]) == Matrix([[4]])
# test_submatrix_assignment
m = sparse_zeros(4)
m[2:4, 2:4] = sparse_eye(2)
assert m == SparseMatrix([(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1)])
assert len(m._smat) == 2
m[:2, :2] = sparse_eye(2)
assert m == sparse_eye(4)
m[:, 0] = SparseMatrix(4, 1, (1, 2, 3, 4))
assert m == SparseMatrix([(1, 0, 0, 0),
(2, 1, 0, 0),
(3, 0, 1, 0),
(4, 0, 0, 1)])
m[:, :] = sparse_zeros(4)
assert m == sparse_zeros(4)
m[:, :] = ((1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16))
assert m == SparseMatrix((( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
m[:2, 0] = [0, 0]
assert m == SparseMatrix((( 0, 2, 3, 4),
( 0, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
# test_reshape
m0 = sparse_eye(3)
assert m0.reshape(1, 9) == SparseMatrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = SparseMatrix(3, 4, lambda i, j: i + j)
assert m1.reshape(4, 3) == \
SparseMatrix([(0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)])
assert m1.reshape(2, 6) == \
SparseMatrix([(0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)])
# test_applyfunc
m0 = sparse_eye(3)
assert m0.applyfunc(lambda x: 2*x) == sparse_eye(3)*2
assert m0.applyfunc(lambda x: 0 ) == sparse_zeros(3)
# test_LUdecomp
testmat = SparseMatrix([[ 0, 2, 5, 3],
[ 3, 3, 7, 4],
[ 8, 4, 0, 2],
[-2, 6, 3, 4]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == sparse_zeros(4)
testmat = SparseMatrix([[ 6, -2, 7, 4],
[ 0, 3, 6, 7],
[ 1, -2, 7, 4],
[-9, 2, 6, 3]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == sparse_zeros(4)
x, y, z = Symbol('x'), Symbol('y'), Symbol('z')
M = Matrix(((1, x, 1), (2, y, 0), (y, 0, z)))
L, U, p = M.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - M == sparse_zeros(3)
# test_LUsolve
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
x = SparseMatrix(3, 1, [3, 7, 5])
b = A*x
soln = A.LUsolve(b)
assert soln == x
A = SparseMatrix([[0, -1, 2],
[5, 10, 7],
[8, 3, 4]])
x = SparseMatrix(3, 1, [-1, 2, 5])
b = A*x
soln = A.LUsolve(b)
assert soln == x
# test_inverse
A = sparse_eye(4)
assert A.inv() == sparse_eye(4)
assert A.inv(method="CH") == sparse_eye(4)
assert A.inv(method="LDL") == sparse_eye(4)
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[7, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method="CH") == Ainv
assert A.inv(method="LDL") == Ainv
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[5, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method="CH") == Ainv
assert A.inv(method="LDL") == Ainv
# test_cross
v1 = Matrix(1, 3, [1, 2, 3])
v2 = Matrix(1, 3, [3, 4, 5])
assert v1.cross(v2) == Matrix(1, 3, [-2, 4, -2])
assert v1.norm(2)**2 == 14
# conjugate
a = SparseMatrix(((1, 2 + I), (3, 4)))
assert a.C == SparseMatrix([
[1, 2 - I],
[3, 4]
])
# mul
assert a*Matrix(2, 2, [1, 0, 0, 1]) == a
assert a + Matrix(2, 2, [1, 1, 1, 1]) == SparseMatrix([
[2, 3 + I],
[4, 5]
])
# col join
assert a.col_join(sparse_eye(2)) == SparseMatrix([
[1, 2 + I],
[3, 4],
[1, 0],
[0, 1]
])
# symmetric
assert not a.is_symmetric(simplify=False)
# test_cofactor
assert sparse_eye(3) == sparse_eye(3).cofactorMatrix()
test = SparseMatrix([[1, 3, 2], [2, 6, 3], [2, 3, 6]])
assert test.cofactorMatrix() == \
SparseMatrix([[27, -6, -6], [-12, 2, 3], [-3, 1, 0]])
test = SparseMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert test.cofactorMatrix() == \
SparseMatrix([[-3, 6, -3], [6, -12, 6], [-3, 6, -3]])
# test_jacobian
x = Symbol('x')
y = Symbol('y')
L = SparseMatrix(1, 2, [x**2*y, 2*y**2 + x*y])
syms = [x, y]
assert L.jacobian(syms) == Matrix([[2*x*y, x**2], [y, 4*y + x]])
L = SparseMatrix(1, 2, [x, x**2*y**3])
assert L.jacobian(syms) == SparseMatrix([[1, 0], [2*x*y**3, x**2*3*y**2]])
# test_QR
A = Matrix([[1, 2], [2, 3]])
Q, S = A.QRdecomposition()
R = Rational
assert Q == Matrix([
[ 5**R(-1, 2), (R(2)/5)*(R(1)/5)**R(-1, 2)],
[2*5**R(-1, 2), (-R(1)/5)*(R(1)/5)**R(-1, 2)]])
assert S == Matrix([
[5**R(1, 2), 8*5**R(-1, 2)],
[ 0, (R(1)/5)**R(1, 2)]])
assert Q*S == A
assert Q.T * Q == sparse_eye(2)
R = Rational
# test nullspace
# first test reduced row-ech form
M = SparseMatrix([[5, 7, 2, 1],
[1, 6, 2, -1]])
out, tmp = M.rref()
assert out == Matrix([[1, 0, -R(2)/23, R(13)/23],
[0, 1, R(8)/23, R(-6)/23]])
M = SparseMatrix([[ 1, 3, 0, 2, 6, 3, 1],
[-2, -6, 0, -2, -8, 3, 1],
[ 3, 9, 0, 0, 6, 6, 2],
[-1, -3, 0, 1, 0, 9, 3]])
out, tmp = M.rref()
assert out == Matrix([[1, 3, 0, 0, 2, 0, 0],
[0, 0, 0, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 1, R(1)/3],
[0, 0, 0, 0, 0, 0, 0]])
# now check the vectors
basis = M.nullspace()
assert basis[0] == Matrix([-3, 1, 0, 0, 0, 0, 0])
assert basis[1] == Matrix([0, 0, 1, 0, 0, 0, 0])
assert basis[2] == Matrix([-2, 0, 0, -2, 1, 0, 0])
assert basis[3] == Matrix([0, 0, 0, 0, 0, R(-1)/3, 1])
# test eigen
x = Symbol('x')
y = Symbol('y')
sparse_eye3 = sparse_eye(3)
assert sparse_eye3.charpoly(x) == PurePoly(((x - 1)**3))
assert sparse_eye3.charpoly(y) == PurePoly(((y - 1)**3))
# test values
M = Matrix([( 0, 1, -1),
( 1, 1, 0),
(-1, 0, 1)])
vals = M.eigenvals()
assert sorted(vals.keys()) == [-1, 1, 2]
R = Rational
M = Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 3, [
Matrix([1, 0, 0]),
Matrix([0, 1, 0]),
Matrix([0, 0, 1])])]
M = Matrix([[5, 0, 2],
[3, 2, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 1, [Matrix([R(-1)/2, R(3)/2, 1])]),
(2, 1, [Matrix([0, 1, 0])]),
(5, 1, [Matrix([1, 1, 0])])]
assert M.zeros(3, 5) == SparseMatrix(3, 5, {})
def test_transpose():
assert SparseMatrix(((1, 2), (3, 4))).transpose() == \
SparseMatrix(((1, 3), (2, 4)))
def test_trace():
assert SparseMatrix(((1, 2), (3, 4))).trace() == 5
assert SparseMatrix(((0, 0), (0, 4))).trace() == 4
def test_CL_RL():
assert SparseMatrix(((1, 2), (3, 4))).row_list() == \
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
assert SparseMatrix(((1, 2), (3, 4))).col_list() == \
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
def test_add():
assert SparseMatrix(((1, 0), (0, 1))) + SparseMatrix(((0, 1), (1, 0))) == \
SparseMatrix(((1, 1), (1, 1)))
a = SparseMatrix(100, 100, lambda i, j: int(j != 0 and i % j == 0))
b = SparseMatrix(100, 100, lambda i, j: int(i != 0 and j % i == 0))
assert (len(a._smat) + len(b._smat) - len((a + b)._smat) > 0)
def test_errors():
raises(ValueError, lambda: SparseMatrix(1.4, 2, lambda i, j: 0))
raises(TypeError, lambda: SparseMatrix([1, 2, 3], [1, 2]))
raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[(1, 2, 3)])
raises(IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[5])
raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2, 3])
raises(TypeError,
lambda: SparseMatrix([[1, 2], [3, 4]]).copyin_list([0, 1], set([])))
raises(
IndexError, lambda: SparseMatrix([[1, 2], [3, 4]]).submatrix((1, 2)))
raises(TypeError, lambda: SparseMatrix([1, 2, 3]).cross(1))
raises(IndexError, lambda: SparseMatrix(1, 2, [1, 2])[3])
raises(ShapeError,
lambda: SparseMatrix(1, 2, [1, 2]) + SparseMatrix(2, 1, [2, 1]))
def test_len():
assert not SparseMatrix()
assert SparseMatrix() == SparseMatrix([])
@XFAIL
def test_len_different_shapes():
assert Matrix() == Matrix([[]])
assert SparseMatrix() == SparseMatrix([[]])
def test_sparse_zeros_sparse_eye():
assert SparseMatrix.eye(3) == eye(3, cls=SparseMatrix)
assert len(SparseMatrix.eye(3)._smat) == 3
assert SparseMatrix.zeros(3) == zeros(3, cls=SparseMatrix)
assert len(SparseMatrix.zeros(3)._smat) == 0
def test_copyin():
s = SparseMatrix(3, 3, {})
s[1, 0] = 1
assert s[:, 0] == SparseMatrix(Matrix([0, 1, 0]))
assert s[3] == 1
assert s[3: 4] == [1]
s[1, 1] = 42
assert s[1, 1] == 42
assert s[1, 1:] == SparseMatrix([[42, 0]])
s[1, 1:] = Matrix([[5, 6]])
assert s[1, :] == SparseMatrix([[1, 5, 6]])
s[1, 1:] = [[42, 43]]
assert s[1, :] == SparseMatrix([[1, 42, 43]])
s[0, 0] = 17
assert s[:, :1] == SparseMatrix([17, 1, 0])
s[0, 0] = [1, 1, 1]
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = Matrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = SparseMatrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
def test_sparse_solve():
from sympy.matrices import SparseMatrix
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
assert A.cholesky() == Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
assert A.cholesky() * A.cholesky().T == Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
L, D = A.LDLdecomposition()
assert 15*L == Matrix([
[15, 0, 0],
[ 9, 15, 0],
[-3, 5, 15]])
assert D == Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
assert L * D * L.T == A
A = SparseMatrix(((3, 0, 2), (0, 0, 1), (1, 2, 0)))
assert A.inv() * A == SparseMatrix(eye(3))
A = SparseMatrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, 0, 2]])
ans = SparseMatrix([
[S(2)/3, S(1)/3, S(1)/6],
[S(1)/3, S(2)/3, S(1)/3],
[ 0, 0, S(1)/2]])
assert A.inv(method='CH') == ans
assert A.inv(method='LDL') == ans
assert A * ans == SparseMatrix(eye(3))
s = A.solve(A[:, 0], 'LDL')
assert A*s == A[:, 0]
s = A.solve(A[:, 0], 'CH')
assert A*s == A[:, 0]
A = A.col_join(A)
s = A.solve_least_squares(A[:, 0], 'CH')
assert A*s == A[:, 0]
s = A.solve_least_squares(A[:, 0], 'LDL')
assert A*s == A[:, 0]
| bsd-3-clause | -4,237,973,322,416,478,000 | 28.998198 | 79 | 0.417923 | false |
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam | unittests/RunDictionary/test_SolutionFile.py | 1 | 2498 | import unittest
from PyFoam.RunDictionary.SolutionFile import SolutionFile
from os import path,environ,remove,system
from tempfile import mktemp
from shutil import copyfile
from .test_TimeDirectory import damBreakTutorial,gammaName
from PyFoam.FoamInformation import foamVersionNumber,foamFork
theSuite=unittest.TestSuite()
class SolutionFileTest(unittest.TestCase):
def setUp(self):
self.theFile=mktemp()
if foamVersionNumber()<(2,0):
extension=""
elif foamFork() in ["openfoam","openfoamplus"] and foamVersionNumber()>=(4,):
extension=".orig"
else:
extension=".org"
copyfile(path.join(damBreakTutorial(),"0",gammaName()+extension),self.theFile)
def tearDown(self):
remove(self.theFile)
def testSolutionFileReadWrite(self):
test=SolutionFile(path.dirname(self.theFile),path.basename(self.theFile))
self.assertEqual(test.readInternalUniform(),"0")
self.assertEqual(test.readBoundary("atmosphere"),"0")
self.assertEqual(test.readDimension(),"0 0 0 0 0 0 0")
test.replaceBoundary("atmosphere",2.3)
self.assertEqual(test.readBoundary("atmosphere"),"2.3")
test.replaceInternal(3.14)
self.assertEqual(test.readInternalUniform(),"3.14")
theSuite.addTest(unittest.makeSuite(SolutionFileTest,"test"))
class SolutionFileTestZipped(unittest.TestCase):
def setUp(self):
self.theFile=mktemp()
if foamVersionNumber()<(2,0):
extension=""
elif foamFork() in ["openfoam","openfoamplus"] and foamVersionNumber()>=(4,):
extension=".orig"
else:
extension=".org"
copyfile(path.join(damBreakTutorial(),"0",gammaName()+extension),self.theFile)
system("gzip -f "+self.theFile)
def tearDown(self):
remove(self.theFile+".gz")
def testSolutionFileZippedReadWrite(self):
test=SolutionFile(path.dirname(self.theFile),path.basename(self.theFile))
self.assertEqual(test.readInternalUniform(),"0")
self.assertEqual(test.readBoundary("atmosphere"),"0")
self.assertEqual(test.readDimension(),"0 0 0 0 0 0 0")
test.replaceBoundary("atmosphere",2.3)
self.assertEqual(test.readBoundary("atmosphere"),"2.3")
test.replaceInternal(3.14)
self.assertEqual(test.readInternalUniform(),"3.14")
theSuite.addTest(unittest.makeSuite(SolutionFileTestZipped,"test"))
# Should work with Python3 and Python2
| gpl-2.0 | 5,917,689,892,909,069,000 | 36.283582 | 86 | 0.683747 | false |
TshepangRas/tshilo-dikotla | td_maternal/forms/antenatal_enrollment_form.py | 1 | 2783 | from dateutil.relativedelta import relativedelta
from django import forms
from edc_constants.constants import YES
from td_maternal.models.enrollment_helper import EnrollmentHelper
from ..models import AntenatalEnrollment, MaternalEligibility
from .base_enrollment_form import BaseEnrollmentForm
class AntenatalEnrollmentForm(BaseEnrollmentForm):
def clean(self):
cleaned_data = super(AntenatalEnrollmentForm, self).clean()
# registered_subject = cleaned_data.get('registered_subject')
# if not registered_subject:
# raise forms.ValidationError('Expected a registered subject. Got None.')
# if not self.instance.id:
# registered_subject = cleaned_data.get('registered_subject')
# try:
# PostnatalEnrollment.objects.get(registered_subject=registered_subject)
# raise forms.ValidationError(
# "Antenatal enrollment is NOT REQUIRED. Postnatal Enrollment already completed")
# except PostnatalEnrollment.DoesNotExist:
# pass
# self.fill_postnatal_enrollment_if_recently_delivered()
# self.raise_if_rapid_test_required()
self.validate_last_period_date(cleaned_data.get('report_datetime'), cleaned_data.get('last_period_date'))
enrollment_helper = EnrollmentHelper(instance_antenatal=self._meta.model(**cleaned_data),
exception_cls=forms.ValidationError)
enrollment_helper.raise_validation_error_for_rapidtest()
return cleaned_data
def validate_last_period_date(self, report_datetime, last_period_date):
if last_period_date and (last_period_date >= report_datetime.date() - relativedelta(weeks=4)):
raise forms.ValidationError('LMP cannot be within 4weeks of report datetime. '
'Got LMP as {} and report datetime as {}'.format(last_period_date,
report_datetime))
def clean_rapid_test_date(self):
rapid_test_date = self.cleaned_data['rapid_test_date']
registered_subject = self.cleaned_data['registered_subject']
if rapid_test_date:
try:
initial = AntenatalEnrollment.objects.get(
registered_subject=registered_subject)
if initial:
if rapid_test_date != initial.rapid_test_date:
raise forms.ValidationError('The rapid test result cannot be changed')
except AntenatalEnrollment.DoesNotExist:
pass
return rapid_test_date
class Meta:
model = AntenatalEnrollment
fields = '__all__'
| gpl-2.0 | -5,039,070,942,179,587,000 | 46.169492 | 113 | 0.625943 | false |
damoxc/vsmtpd | vsmtpd/daemon.py | 1 | 7764 | #
# vsmtpd/daemon.py
#
# Copyright (C) 2011 Damien Churchill <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
import os
import sys
import gevent
import signal
import logging
import vsmtpd.logging_setup
from gevent import socket
from gevent.pool import Pool
from gevent.server import StreamServer
from optparse import OptionParser
from vsmtpd.config import load_config
from vsmtpd.config import ConfigWrapper
from vsmtpd.connection import Connection
from vsmtpd.hooks import HookManager
from vsmtpd.plugins.manager import PluginManager
from vsmtpd.util import set_cmdline
log = logging.getLogger(__name__)
vsmtpd = None
class Vsmtpd(object):
def __init__(self, options, args):
self.options = options
self.args = args
self.pool = None
self.workers = []
# Load the configuration for the server
self.load_config()
# If we positive connection limit create a Pool with that limit
connection_limit = self.config.getint('connection_limit')
if connection_limit > 0:
self.pool = Pool(connection_limit)
log.info('Limiting connections to %d', connection_limit)
# Create the hook manager
self.hook_manager = HookManager()
# Create the plugin manager
plugin_path = self.config.get('plugin_path').split(':')
self.plugin_manager = PluginManager(plugin_path)
def fire(self, hook_name, *args, **kwargs):
return self.hook_manager.dispatch_hook(hook_name, *args, **kwargs)
def handle(self, socket, address):
connection = Connection(self, socket, address)
connection.run_hooks('pre_connection', connection)
connection.accept()
connection.run_hooks('post_connection', connection)
def load_config(self):
self._config = load_config(self.options.config or 'vsmtpd.cfg', {
'vsmtpd': {
'port': 25,
'interface': None,
'backlog': 50,
'workers': 0,
'size_limit': 0,
'helo_host': None,
'connection_limit': 100,
'spool_dir': '/var/spool/vsmtpd',
'keyfile': None,
'certfile': None,
'cert_reqs': None,
# FIXME: Provide a default secure (SSLV3/TLSV1) cipher setup
'ssl_version': None,
'ca_certs': None,
'suppress_ragged_eofs': None,
'do_handshake_on_connect': None,
# FIXME: Provide a default secure (SSLV3/TLSV1) cipher setup
'ciphers': None,
'plugin_path': '/usr/share/vsmtpd/plugins'
}
})
self.config = ConfigWrapper(self._config, 'vsmtpd')
def load_plugins(self):
log.info('Loading plugins...')
# Load the plugins
for section in self._config.sections():
if not section.startswith('plugin:'):
continue
plugin_name = section.split(':', 1)[1]
try:
plugin_cls = self.plugin_manager.load(plugin_name)
except Exception as e:
log.fatal("Failed to load plugin '%s'", plugin_name)
log.exception(e)
exit(1)
try:
if self._config.options(section):
plugin = plugin_cls(ConfigWrapper(self._config, section))
else:
plugin = plugin_cls()
plugin.plugin_name = plugin_name
except Exception as e:
log.fatal("Failed to initialise plugin '%s'", plugin_name)
log.exception(e)
exit(1)
self.hook_manager.register_object(plugin)
def reload(self):
"""
Reload the configuration.
"""
def start(self):
"""
Starts the vsmtpd server in either master or worker mode.
"""
# Install the signal handlers
signal.signal(signal.SIGTERM, self.stop)
signal.signal(signal.SIGHUP, self.reload)
signal.signal(signal.SIGINT, self.stop)
workers = self.config.getint('workers')
backlog = self.config.getint('backlog')
addr = ('0.0.0.0', 2500)
if backlog < 1:
backlog = 50
log.info('Starting server on %s port %d', *addr)
if workers <= 0:
set_cmdline('vsmtpd: master')
self._start(addr, backlog)
# Open the socket for master/worker operation.
self.sock = socket.socket()
self.sock.bind(addr)
self.sock.listen(backlog)
self.sock.setblocking(0)
# Spawn the worker servers
for i in xrange(0, workers):
self._start_slave()
# Set the process title
set_cmdline('vsmtpd: master')
# Wait for the children to die
try:
os.waitpid(-1, 0)
except OSError:
pass
def _start(self, listener, backlog=None):
"""
Starts the vsmtpd server.
"""
self.server = StreamServer(listener, self.handle, backlog=backlog,
spawn=self.pool)
self.server.serve_forever()
def _start_slave(self):
"""
Starts a new slave worker process.
"""
pid = os.fork()
if pid == 0:
# Set up the command line and logger id
set_cmdline('vsmtpd: worker')
log.connection_id = 'worker'
# Call event_reinit()
gevent.reinit()
# Start vsmtpd
self._start(self.sock)
else:
log.info('Worker spawned PID %d', pid)
self.workers.append(pid)
def stop(self, *args):
"""
Shuts down the vsmtpd server and any workers running.
"""
# Shut down the server or the socket, depending on which is running
if self.workers:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
for pid in self.workers:
os.kill(pid, signal.SIGTERM)
else:
self.server.stop()
# Finally exit successfully
sys.exit()
def main():
global vsmtpd
parser = OptionParser()
parser.add_option('-c', '--config', dest='config', action='store',
default=None, help='the configuration file to use')
parser.add_option('-l', '--listen', dest='listen', action='append',
help='listen on this address')
parser.add_option('-p', '--port', dest='port', type='int', default=None,
help='set the default port to listen on')
(options, args) = parser.parse_args()
# Configure logging
logging.basicConfig(
level=logging.DEBUG,
format = '%(asctime)s %(levelname)-6s [%(name)-20s:%(lineno)-3s] [%(conn_id)-7s] %(message)s',
datefmt = '%a %d %b %Y %H:%M:%S'
)
log.connection_id = 'master'
vsmtpd = Vsmtpd(options, args)
vsmtpd.load_plugins()
try:
vsmtpd.start()
except KeyboardInterrupt:
vsmtpd.stop()
| gpl-3.0 | -6,894,904,686,943,122,000 | 30.180723 | 102 | 0.577409 | false |
zentralopensource/zentral | zentral/contrib/osquery/serializers.py | 1 | 5197 | from rest_framework import serializers
from zentral.contrib.inventory.models import EnrollmentSecret
from zentral.contrib.inventory.serializers import EnrollmentSecretSerializer
from .models import Configuration, Enrollment, Pack, Platform
class ConfigurationSerializer(serializers.ModelSerializer):
class Meta:
model = Configuration
fields = ("id", "name", "description",
"inventory", "inventory_apps", "inventory_interval",
"options",
"created_at", "updated_at")
class EnrollmentSerializer(serializers.ModelSerializer):
secret = EnrollmentSecretSerializer(many=False)
enrolled_machines_count = serializers.SerializerMethodField()
class Meta:
model = Enrollment
# TODO: distributor, maybe with a link ?
fields = ("id", "configuration", "enrolled_machines_count", "osquery_release", "secret", "version")
def get_enrolled_machines_count(self, obj):
return obj.enrolledmachine_set.count()
def create(self, validated_data):
secret_data = validated_data.pop('secret')
secret = EnrollmentSecret.objects.create(**secret_data)
enrollment = Enrollment.objects.create(secret=secret, **validated_data)
return enrollment
def update(self, instance, validated_data):
secret_serializer = self.fields["secret"]
secret_data = validated_data.pop('secret')
secret_serializer.update(instance.secret, secret_data)
return super().update(instance, validated_data)
# Standard Osquery packs
class OsqueryPlatformField(serializers.ListField):
def to_internal_value(self, data):
if data:
platforms = set(data.lower().split(","))
if platforms:
unknown_platforms = platforms - Platform.accepted_platforms()
if unknown_platforms:
raise serializers.ValidationError(
'Unknown platforms: {}'.format(", ".join(sorted(unknown_platforms)))
)
return sorted(platforms)
return []
class OsqueryQuerySerializer(serializers.Serializer):
query = serializers.CharField(allow_blank=False)
interval = serializers.IntegerField(min_value=1, max_value=604800)
removed = serializers.BooleanField(required=False)
snapshot = serializers.BooleanField(required=False)
platform = OsqueryPlatformField(required=False)
version = serializers.RegexField(r"^[0-9]{1,4}\.[0-9]{1,4}\.[0-9]{1,4}$", required=False)
shard = serializers.IntegerField(min_value=1, max_value=100, required=False)
denylist = serializers.BooleanField(default=True, required=False)
description = serializers.CharField(allow_blank=True, required=False)
value = serializers.CharField(allow_blank=False, required=False)
def validate(self, data):
snapshot = data.get("snapshot", False)
if snapshot and data.get("removed"):
raise serializers.ValidationError('{"action": "removed"} results are not available in "snapshot" mode')
return data
class OsqueryPackSerializer(serializers.Serializer):
name = serializers.CharField(max_length=256, required=False)
description = serializers.CharField(required=False)
discovery = serializers.ListField(child=serializers.CharField(allow_blank=False), allow_empty=True, required=False)
platform = OsqueryPlatformField(required=False)
version = serializers.RegexField(r"^[0-9]{1,4}\.[0-9]{1,4}\.[0-9]{1,4}$", required=False)
shard = serializers.IntegerField(min_value=1, max_value=100, required=False)
queries = serializers.DictField(child=OsqueryQuerySerializer(), allow_empty=False)
def get_pack_defaults(self, slug):
return {
"name": self.data.get("name", slug),
"description": self.data.get("description", ""),
"discovery_queries": self.data.get("discovery", []),
"shard": self.data.get("shard", None)
}
def iter_query_defaults(self, pack_slug):
pack_platforms = self.data.get("platform", [])
pack_minimum_osquery_version = self.data.get("version", None)
for query_slug, query_data in self.data["queries"].items():
pack_query_defaults = {
"slug": query_slug,
"interval": query_data["interval"],
"log_removed_actions": not query_data.get("snapshot", False) and query_data.get("removed", True),
"snapshot_mode": query_data.get("snapshot", False),
"shard": query_data.get("shard"),
"can_be_denylisted": query_data.get("can_be_denylisted", True),
}
query_defaults = {
"name": f"{pack_slug}{Pack.DELIMITER}{query_slug}",
"sql": query_data["query"],
"platforms": query_data.get("platform", pack_platforms),
"minimum_osquery_version": query_data.get("version", pack_minimum_osquery_version),
"description": query_data.get("description", ""),
"value": query_data.get("value", "")
}
yield query_slug, pack_query_defaults, query_defaults
| apache-2.0 | -9,080,164,344,115,751,000 | 44.587719 | 119 | 0.646527 | false |
GreatSCT/GreatSCT | lib/common/orchestra.py | 1 | 8604 | '''
This is the conductor which controls everything
'''
import glob
import imp
import os
import readline
import sys
from lib.common import completer
from lib.common import helpers
from lib.common import messages
class Conductor:
def __init__(self, cli_stuff):
# Create dictionaries of supported modules
# empty until stuff loaded into them
self.imported_tools = {}
self.load_tools(cli_stuff)
self.mainmenu_commands = {
"list": "List available tools",
"use": "Use a specific tool",
"info": "Information on a specific tool",
"update": "Update GreatSCT",
"exit": "Exit GreatSCT"}
self.number_of_tools = len(self.imported_tools)
self.command_line_options = cli_stuff
def command_line_use(self):
tool_found = False
for key, tool_object in sorted(self.imported_tools.items()):
# if the entered number matches the payload, use that payload
if self.command_line_options.tool.lower() == tool_object.cli_name.lower():
tool_object.cli_menu()
tool_found = True
if not tool_found:
print(helpers.color('Error: You did not provide a valid tool name!', warning=True))
print(helpers.color('Quitting GreatSCT...', warning=True))
sys.exit()
def list_tools(self):
# show title bar
messages.title_screen()
# Loop over all tools loaded into GreatSCT, print name and description
# Function for listing all payloads
tool_counter = 1
print(helpers.color(' [*] Available Tools:\n'))
for key, tool in sorted(self.imported_tools.items()):
print('\t' + str(tool_counter) + ")\t" + tool.cli_name)
tool_counter += 1
print()
return
def load_tools(self, command_line_object):
# Load all tools within the Tools folder
# (Evasion, Ordnance, Pillage, etc.)
for name in glob.glob('Tools/*/Tool.py'):
if name.endswith(".py") and ("__init__" not in name):
loaded_tool = imp.load_source(
name.replace("/", ".").rstrip('.py'), name)
self.imported_tools[name] = loaded_tool.Tools(
command_line_object)
return
def main_menu(self):
# default blank command for the main meny loop
main_menu_command = ''
show_header = True
# Try except to catch keyboard interrupts
try:
# Loop for the main menu, will always loop as long as command is ''
while main_menu_command == '':
comp = completer.GreatSCTMainMenuCompleter(self.mainmenu_commands, self.imported_tools)
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(comp.complete)
if show_header:
messages.title_screen()
print("Main Menu")
print("\n\t" + helpers.color(len(self.imported_tools)) + " tools loaded\n")
print("Available Commands:\n")
for command in sorted(self.mainmenu_commands.keys()):
print("\t" + helpers.color(command) + '\t\t\t' + self.mainmenu_commands[command])
print()
main_menu_command = input('Main menu choice: ').strip()
if main_menu_command.startswith('use'):
# Check to make sure a tool is provided with use command
if len(main_menu_command.split()) == 1:
# List tools, don't show header, loop back in main menu
self.list_tools()
show_header = False
main_menu_command = ''
elif len(main_menu_command.split()) == 2:
# Grab the command, either the number or word
tool_choice = main_menu_command.split()[1]
# if we're choosing the payload by numbers
if tool_choice.isdigit() and\
0 < int(tool_choice) <= len(self.imported_tools):
tool_number = 1
for key, tool_object in sorted(self.imported_tools.items()):
# if the entered number matches the payload, use that payload
if int(tool_choice) == tool_number:
tool_object.tool_main_menu()
tool_number += 1
show_header = True
else:
tool_number += 1
show_header = True
# Else if selecting payload by name
else:
for key, tool_object in sorted(self.imported_tools.items()):
# if the entered number matches the payload, use that payload
if tool_choice.lower() == tool_object.cli_name.lower():
tool_object.tool_main_menu()
show_header = True
# Once done with tool, clear main menu command
main_menu_command = ''
show_header = True
# Catch anything else, like an error
else:
main_menu_command = ''
elif main_menu_command.startswith('list'):
# List tools, don't show header, loop back in main menu
self.list_tools()
show_header = False
main_menu_command = ''
elif main_menu_command.startswith('info'):
if len(main_menu_command.split()) == 1:
show_header = True
main_menu_command = ''
elif len(main_menu_command.split()) == 2:
# Grab the command, either the number or word
info_choice = main_menu_command.split()[1]
# if we're choosing the payload by numbers
if info_choice.isdigit() and\
0 < int(info_choice) <= len(self.imported_tools):
tool_number = 1
for key, tool_object in sorted(self.imported_tools.items()):
# if the entered number matches the tool, use that tool
if int(info_choice) == tool_number:
print()
print(helpers.color(tool_object.cli_name) + " => " + tool_object.description)
print()
show_header = False
tool_number += 1
# if the entered name matches the tool, use that tool
else:
for key, tool_object in sorted(self.imported_tools.items()):
if main_menu_command.split()[1].lower() == tool_object.cli_name.lower():
print()
print(helpers.color(tool_object.cli_name) + " => " + tool_object.description)
print()
show_header = False
main_menu_command = ''
else:
main_menu_command = ''
show_header = True
elif main_menu_command.startswith('update'):
self.update_greatsct()
main_menu_command = ''
elif main_menu_command.startswith('exit'):
print('\n' + helpers.color('You rage quit GreatSCT!', warning=True) + '\n')
sys.exit()
else:
show_header = True
main_menu_command = ''
except KeyboardInterrupt:
print("\n\n" + helpers.color("You rage quit GreatSCT!", warning=True))
sys.exit()
def update_greatsct(self):
os.system('git pull')
input('GreatSCT has checked for updates, press enter to continue')
return
| gpl-3.0 | 3,642,661,304,942,000,600 | 40.565217 | 113 | 0.477569 | false |
cmancone/mygrations | tests/formats/mysql/file_reader/parsers/test_type_plain.py | 1 | 2282 | import unittest
from mygrations.formats.mysql.file_reader.parsers.type_plain import type_plain
class test_type_plain(unittest.TestCase):
def test_simple(self):
# parse typical insert values
parser = type_plain()
returned = parser.parse("created date NOT NULL DEFAULT 'bob',")
self.assertTrue(parser.matched)
self.assertEquals('', returned)
self.assertEquals('column', parser.definition_type)
self.assertEquals('created', parser.name)
self.assertEquals('DATE', parser.column_type)
self.assertFalse(parser.null)
self.assertEquals('bob', parser.default)
self.assertTrue(parser.has_comma)
self.assertEquals(0, len(parser.errors))
self.assertEquals(0, len(parser.warnings))
self.assertEquals("`created` DATE NOT NULL DEFAULT 'bob'", str(parser))
def test_optional_default(self):
# parse typical insert values
parser = type_plain()
returned = parser.parse("created date")
self.assertTrue(parser.matched)
self.assertEquals('', returned)
self.assertEquals('column', parser.definition_type)
self.assertEquals('created', parser.name)
self.assertEquals('DATE', parser.column_type)
self.assertTrue(parser.null)
self.assertEquals(None, parser.default)
self.assertFalse(parser.has_comma)
self.assertEquals(0, len(parser.errors))
self.assertEquals(0, len(parser.warnings))
self.assertEquals("`created` DATE", str(parser))
def test_strip_backticks(self):
# parse typical insert values
parser = type_plain()
returned = parser.parse("`created` date")
self.assertTrue(parser.matched)
self.assertEquals('', returned)
self.assertEquals('column', parser.definition_type)
self.assertEquals('created', parser.name)
self.assertEquals("`created` DATE", str(parser))
def test_not_null_needs_default(self):
# parse typical insert values
parser = type_plain()
returned = parser.parse("created date NOT NULL")
self.assertTrue(parser.matched)
self.assertEquals('', returned)
self.assertTrue('is not null and has no default' in parser.warnings[0])
| mit | -526,332,220,805,078,900 | 35.222222 | 79 | 0.656004 | false |
WojciechMigda/TCO-PCFStupskiPrize1 | src/toyclf.py | 1 | 3538 | #!/opt/anaconda2/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
################################################################################
#
# Copyright (c) 2015 Wojciech Migda
# All rights reserved
# Distributed under the terms of the MIT license
#
################################################################################
#
# Filename: colorspaces.py
#
# Decription:
# Toy code / sandbox
#
# Authors:
# Wojciech Migda
#
################################################################################
#
# History:
# --------
# Date Who Ticket Description
# ---------- --- --------- ------------------------------------------------
# 2015-12-19 wm Initial version
#
################################################################################
"""
from sys import path
path.insert(0, './Pipe')
#from pipe import *
import pipe as P
from pipelib import as_csv_rows
def main():
y_train = (
"../../data/training.csv"
| as_csv_rows
| P.select(lambda x: float(x[1]))
| P.as_list
)
X_labels = (
"../../data/training.csv"
| as_csv_rows
| P.select(lambda x: x[0])
| P.as_list
)
X_train = (
"lbp24_np50000_train.csv"
| as_csv_rows
| P.select(lambda l: [float(x) for x in l])
| P.as_list
)
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import ExtraTreesRegressor
SEED = 1
NEST = 5000
clf = ExtraTreesRegressor(verbose=0, n_estimators=NEST, random_state=SEED)
clf.fit(X_train[:-20], y_train[:-20])
print(zip(clf.predict(X_train[-20:]), y_train[-20:]))
print(mean_squared_error(clf.predict(X_train[-20:]), y_train[-20:]))
clf.fit(X_train[20:], y_train[20:])
print(zip(clf.predict(X_train[:20]), y_train[:20]))
print(mean_squared_error(clf.predict(X_train[:20]), y_train[:20]))
"""
from sklearn.ensemble import AdaBoostRegressor
clf = AdaBoostRegressor(base_estimator=ExtraTreesRegressor(verbose=0, n_estimators=NEST, random_state=SEED), random_state=1)
clf.fit(X_train[20:], y_train[20:])
print(zip(clf.predict(X_train[:20]), y_train[:20]))
print(mean_squared_error(clf.predict(X_train[:20]), y_train[:20]))
"""
"""
from sklearn.neighbors import KNeighborsRegressor
def kullback_leibler_divergence(p, q):
import numpy as np
p = np.asarray(p)
q = np.asarray(q)
filt = np.logical_and(p != 0, q != 0)
return np.sum(p[filt] * np.log2(p[filt] / q[filt]))
#clf = KNeighborsRegressor(n_neighbors=1, metric='pyfunc', func=kullback_leibler_divergence)
clf = KNeighborsRegressor(n_neighbors=5)
X_train = [r[:-1] for r in X_train]
clf.fit(X_train[20:], y_train[20:])
print(zip(clf.predict(X_train[:20]), y_train[:20]))
"""
"""
def score_gen():
from sklearn.cross_validation import KFold
from numpy import array
kf = KFold(len(y_train), n_folds=10)
for itrain, itest in kf:
ytrain = array(y_train)[itrain]
Xtrain = array(X_train)[itrain]
ytest = array(y_train)[itest]
Xtest = array(X_train)[itest]
clf.fit(Xtrain, ytrain)
result = clf.score(Xtest, ytest) / len(kf)
print(result)
yield result
CVscore = sum(score_gen())
print(CVscore)
"""
pass
if __name__ == "__main__":
main()
pass
| mit | 8,233,582,854,996,139,000 | 27.532258 | 128 | 0.512154 | false |
zultron/virt-manager | virtManager/domain.py | 1 | 61912 | #
# Copyright (C) 2006, 2013 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
# pylint: disable=E0611
from gi.repository import GObject
# pylint: enable=E0611
import logging
import os
import time
import threading
import libvirt
from virtinst import DomainSnapshot
from virtinst import Guest
from virtinst import util
from virtinst import VirtualController
from virtManager import uihelpers
from virtManager.libvirtobject import vmmLibvirtObject
def compare_device(origdev, newdev, idx):
devprops = {
"disk" : ["target", "bus"],
"interface" : ["macaddr", "vmmindex"],
"input" : ["bus", "type", "vmmindex"],
"sound" : ["model", "vmmindex"],
"video" : ["model", "vmmindex"],
"watchdog" : ["vmmindex"],
"hostdev" : ["type", "managed", "vmmindex",
"product", "vendor",
"function", "domain", "slot"],
"serial" : ["type", "target_port"],
"parallel" : ["type", "target_port"],
"console" : ["type", "target_type", "target_port"],
"graphics" : ["type", "vmmindex"],
"controller" : ["type", "index"],
"channel" : ["type", "target_name"],
"filesystem" : ["target" , "vmmindex"],
"smartcard" : ["mode" , "vmmindex"],
"redirdev" : ["bus" , "type", "vmmindex"],
"tpm" : ["type" , "vmmindex"],
"rng" : ["type" , "vmmindex"],
}
if id(origdev) == id(newdev):
return True
if type(origdev) is not type(newdev):
return False
for devprop in devprops[origdev.virtual_device_type]:
origval = getattr(origdev, devprop)
if devprop == "vmmindex":
newval = idx
else:
newval = getattr(newdev, devprop)
if origval != newval:
return False
return True
def find_device(guest, origdev):
devlist = guest.get_devices(origdev.virtual_device_type)
for idx in range(len(devlist)):
dev = devlist[idx]
if compare_device(origdev, dev, idx):
return dev
return None
def start_job_progress_thread(vm, meter, progtext):
current_thread = threading.currentThread()
def jobinfo_cb():
while True:
time.sleep(.5)
if not current_thread.isAlive():
return False
try:
jobinfo = vm.job_info()
data_total = float(jobinfo[3])
# data_processed = float(jobinfo[4])
data_remaining = float(jobinfo[5])
# data_total is 0 if the job hasn't started yet
if not data_total:
continue
if not meter.started:
meter.start(size=data_total,
text=progtext)
progress = data_total - data_remaining
meter.update(progress)
except:
logging.exception("Error calling jobinfo")
return False
return True
if vm.getjobinfo_supported:
t = threading.Thread(target=jobinfo_cb,
name="job progress reporting",
args=())
t.daemon = True
t.start()
class vmmInspectionData(object):
def __init__(self):
self.type = None
self.distro = None
self.major_version = None
self.minor_version = None
self.hostname = None
self.product_name = None
self.product_variant = None
self.icon = None
self.applications = None
self.error = False
class vmmDomainSnapshot(vmmLibvirtObject):
"""
Class wrapping a virDomainSnapshot object
"""
def __init__(self, conn, backend):
vmmLibvirtObject.__init__(self, conn, backend, backend.getName(),
DomainSnapshot)
self.refresh_xml()
def get_name(self):
return self.get_xmlobj().name
def _XMLDesc(self, flags):
return self._backend.getXMLDesc(flags=flags)
def delete(self, force=True):
ignore = force
self._backend.delete()
def run_status(self):
status = DomainSnapshot.state_str_to_int(self.get_xmlobj().state)
return vmmDomain.pretty_run_status(status)
def run_status_icon_name(self):
status = DomainSnapshot.state_str_to_int(self.get_xmlobj().state)
if status not in uihelpers.vm_status_icons:
logging.debug("Unknown status %d, using NOSTATE", status)
status = libvirt.VIR_DOMAIN_NOSTATE
return uihelpers.vm_status_icons[status]
def is_external(self):
if self.get_xmlobj().memory_type == "external":
return True
for disk in self.get_xmlobj().disks:
if disk.snapshot == "external":
return True
return False
class vmmDomain(vmmLibvirtObject):
"""
Class wrapping virDomain libvirt objects. Is also extended to be
backed by a virtinst.Guest object for new VM 'customize before install'
"""
__gsignals__ = {
"status-changed": (GObject.SignalFlags.RUN_FIRST, None, [int, int]),
"resources-sampled": (GObject.SignalFlags.RUN_FIRST, None, []),
"inspection-changed": (GObject.SignalFlags.RUN_FIRST, None, []),
"pre-startup": (GObject.SignalFlags.RUN_FIRST, None, [object]),
}
@staticmethod
def pretty_run_status(status, has_saved=False):
if status == libvirt.VIR_DOMAIN_RUNNING:
return _("Running")
elif status == libvirt.VIR_DOMAIN_PAUSED:
return _("Paused")
elif status == libvirt.VIR_DOMAIN_SHUTDOWN:
return _("Shutting Down")
elif status == libvirt.VIR_DOMAIN_SHUTOFF:
if has_saved:
return _("Saved")
else:
return _("Shutoff")
elif status == libvirt.VIR_DOMAIN_CRASHED:
return _("Crashed")
elif (hasattr(libvirt, "VIR_DOMAIN_PMSUSPENDED") and
status == libvirt.VIR_DOMAIN_PMSUSPENDED):
return _("Suspended")
logging.debug("Unknown status %d, returning 'Unknown'", status)
return _("Unknown")
def __init__(self, conn, backend, key):
vmmLibvirtObject.__init__(self, conn, backend, key, Guest)
self.uuid = key
self.cloning = False
self.record = []
self.maxRecord = {
"diskRdRate" : 10.0,
"diskWrRate" : 10.0,
"netTxRate" : 10.0,
"netRxRate" : 10.0,
}
self._install_abort = False
self.reboot_listener = None
self._startup_vcpus = None
self._is_management_domain = None
self._id = None
self._name = None
self._snapshot_list = None
self.lastStatus = libvirt.VIR_DOMAIN_SHUTOFF
self.managedsave_supported = False
self.remote_console_supported = False
self.title_supported = False
self._enable_net_poll = False
self._stats_net_supported = True
self._stats_net_skip = []
self._enable_disk_poll = False
self._stats_disk_supported = True
self._stats_disk_skip = []
self.inspection = vmmInspectionData()
if isinstance(self._backend, Guest):
return
self._libvirt_init()
def _cleanup(self):
for snap in self._snapshot_list or []:
snap.cleanup()
self._snapshot_list = None
def _libvirt_init(self):
"""
Initialization to do if backed by a libvirt virDomain
"""
self.managedsave_supported = self.conn.check_support(
self.conn.SUPPORT_DOMAIN_MANAGED_SAVE, self._backend)
self.remote_console_supported = self.conn.check_support(
self.conn.SUPPORT_DOMAIN_CONSOLE_STREAM, self._backend)
self.title_supported = self.conn.check_support(
self.conn.SUPPORT_DOMAIN_GET_METADATA, self._backend)
# Determine available XML flags (older libvirt versions will error
# out if passed SECURE_XML, INACTIVE_XML, etc)
(self._inactive_xml_flags,
self._active_xml_flags) = self.conn.get_dom_flags(self._backend)
self.toggle_sample_network_traffic()
self.toggle_sample_disk_io()
self.force_update_status()
# Hook up listeners that need to be cleaned up
self.add_gconf_handle(
self.config.on_stats_enable_net_poll_changed(
self.toggle_sample_network_traffic))
self.add_gconf_handle(
self.config.on_stats_enable_disk_poll_changed(
self.toggle_sample_disk_io))
self.connect("status-changed", self._update_start_vcpus)
self.connect("pre-startup", self._prestartup_nodedev_check)
def _prestartup_nodedev_check(self, src, ret):
ignore = src
error = _("There is more than one '%s' device attached to "
"your host, and we can't determine which one to "
"use for your guest.\n"
"To fix this, remove and reattach the USB device "
"to your guest using the 'Add Hardware' wizard.")
for hostdev in self.get_hostdev_devices():
devtype = hostdev.type
if devtype != "usb":
continue
vendor = hostdev.vendor
product = hostdev.product
bus = hostdev.bus
device = hostdev.device
if vendor and product:
count = self.conn.get_nodedevs_number("usb_device",
vendor,
product)
if count > 1 and not (bus and device):
prettyname = "%s %s" % (vendor, product)
ret.append(error % prettyname)
###########################
# Misc API getter methods #
###########################
def get_name(self):
if self._name is None:
self._name = self._backend.name()
return self._name
def get_name_or_title(self):
title = self.get_title()
if title:
return title
return self.get_name()
def get_title(self):
return self.get_xmlobj(inactive=True).title
def get_id(self):
if self._id is None:
self._id = self._backend.ID()
return self._id
def status(self):
return self.lastStatus
def get_cloning(self):
return self.cloning
def set_cloning(self, val):
self.cloning = bool(val)
# If manual shutdown or destroy specified, make sure we don't continue
# install process
def get_install_abort(self):
return bool(self._install_abort)
def rhel6_defaults(self):
return self.conn.rhel6_defaults(self.get_emulator())
def is_read_only(self):
if self.is_management_domain():
return True
return False
def is_management_domain(self):
if self._is_management_domain is None:
self._is_management_domain = (self.get_id() == 0)
return self._is_management_domain
def has_spicevmc_type_redirdev(self):
devs = self.get_redirdev_devices()
for dev in devs:
if dev.type == "spicevmc":
return True
return False
def get_id_pretty(self):
i = self.get_id()
if i < 0:
return "-"
return str(i)
##################
# Support checks #
##################
def _get_getvcpus_supported(self):
return self.conn.check_support(
self.conn.SUPPORT_DOMAIN_GETVCPUS, self._backend)
getvcpus_supported = property(_get_getvcpus_supported)
def _get_getjobinfo_supported(self):
return self.conn.check_support(
self.conn.SUPPORT_DOMAIN_JOB_INFO, self._backend)
getjobinfo_supported = property(_get_getjobinfo_supported)
def snapshots_supported(self):
if not self.conn.check_support(
self.conn.SUPPORT_DOMAIN_LIST_SNAPSHOTS, self._backend):
return _("Libvirt connection does not support snapshots.")
if self.list_snapshots():
return
# Check if our disks are all qcow2
seen_qcow2 = False
for disk in self.get_disk_devices(refresh_if_nec=False):
if disk.read_only:
continue
if not disk.path:
continue
if disk.driver_type == "qcow2":
seen_qcow2 = True
continue
return _("Snapshots are only supported if all writeable disks "
"images allocated to the guest are qcow2 format.")
if not seen_qcow2:
return _("Snapshots require at least one writeable qcow2 disk "
"image allocated to the guest.")
#############################
# Internal XML handling API #
#############################
def _invalidate_xml(self):
vmmLibvirtObject._invalidate_xml(self)
self._name = None
self._id = None
def _redefine_device(self, cb, origdev):
defguest = self._get_xmlobj_to_define()
dev = find_device(defguest, origdev)
if dev:
return cb(dev)
# If we are removing multiple dev from an active VM, a double
# attempt may result in a lookup failure. If device is present
# in the active XML, assume all is good.
if find_device(self.get_xmlobj(), origdev):
logging.debug("Device in active config but not inactive config.")
return
raise RuntimeError(_("Could not find specified device in the "
"inactive VM configuration: %s") % repr(origdev))
##############################
# Persistent XML change APIs #
##############################
def define_name(self, newname):
return self._define_name_helper("domain",
self.conn.rename_vm,
newname)
# Device Add/Remove
def add_device(self, devobj):
"""
Redefine guest with appended device XML 'devxml'
"""
def change(guest):
guest.add_device(devobj)
ret = self._redefine(change)
self.redefine_cached()
return ret
def remove_device(self, devobj):
"""
Remove passed device from the inactive guest XML
"""
# HACK: If serial and console are both present, they both need
# to be removed at the same time
con = None
if hasattr(devobj, "virtmanager_console_dup"):
con = getattr(devobj, "virtmanager_console_dup")
def change(guest):
def rmdev(editdev):
if con:
rmcon = find_device(guest, con)
if rmcon:
guest.remove_device(rmcon)
guest.remove_device(editdev)
return self._redefine_device(rmdev, devobj)
ret = self._redefine(change)
self.redefine_cached()
return ret
# CPU define methods
def define_vcpus(self, vcpus, maxvcpus):
def change(guest):
guest.curvcpus = int(vcpus)
guest.vcpus = int(maxvcpus)
return self._redefine(change)
def define_cpuset(self, cpuset):
def change(guest):
guest.cpuset = cpuset
return self._redefine(change)
def define_cpu_topology(self, sockets, cores, threads):
def change(guest):
cpu = guest.cpu
cpu.sockets = sockets
cpu.cores = cores
cpu.threads = threads
return self._redefine(change)
def define_cpu(self, model, vendor, from_host, featurelist):
def change(guest):
if from_host:
guest.cpu.copy_host_cpu()
elif guest.cpu.model != model:
# Since we don't expose this in the UI, have host value trump
# caps value
guest.cpu.vendor = vendor
guest.cpu.model = model or None
if guest.cpu.model is None:
for f in guest.cpu.features:
guest.cpu.remove_feature(f)
return
origfeatures = guest.cpu.features
def set_feature(fname, fpol):
for f in origfeatures:
if f.name != fname:
continue
if f.policy != fpol:
if fpol == "default":
guest.cpu.remove_feature(f)
else:
f.policy = fpol
return
if fpol != "default":
guest.cpu.add_feature(fname, fpol)
# Sync feature lists
for fname, fpol in featurelist:
set_feature(fname, fpol)
return self._redefine(change)
# Mem define methods
def define_both_mem(self, memory, maxmem):
def change(guest):
guest.memory = int(memory)
guest.maxmemory = int(maxmem)
return self._redefine(change)
# Security define methods
def define_seclabel(self, model, t, label, relabel):
def change(guest):
seclabel = guest.seclabel
seclabel.model = model or None
if not model:
return
if relabel is not None:
if relabel:
seclabel.relabel = "yes"
else:
seclabel.relabel = "no"
seclabel.type = t
if label:
seclabel.label = label
return self._redefine(change)
# Machine config define methods
def define_acpi(self, newvalue):
def change(guest):
guest.features.acpi = newvalue
return self._redefine(change)
def define_apic(self, newvalue):
def change(guest):
guest.features.apic = newvalue
return self._redefine(change)
def define_clock(self, newvalue):
def change(guest):
guest.clock.offset = newvalue
return self._redefine(change)
def define_machtype(self, newvalue):
def change(guest):
guest.os.machine = newvalue
return self._redefine(change)
def define_description(self, newvalue):
def change(guest):
guest.description = newvalue or None
return self._redefine(change)
def define_title(self, newvalue):
def change(guest):
guest.title = newvalue or None
return self._redefine(change)
# Boot define methods
def set_boot_device(self, boot_list):
def change(guest):
guest.os.bootorder = boot_list
return self._redefine(change)
def set_boot_menu(self, newval):
def change(guest):
guest.os.enable_bootmenu = bool(newval)
return self._redefine(change)
def set_boot_kernel(self, kernel, initrd, dtb, args):
def change(guest):
guest.os.kernel = kernel or None
guest.os.initrd = initrd or None
guest.os.dtb = dtb or None
guest.os.kernel_args = args or None
return self._redefine(change)
def set_boot_init(self, init):
def change(guest):
guest.os.init = init
return self._redefine(change)
# Disk define methods
def define_storage_media(self, devobj, newpath):
def change(editdev):
editdev.path = newpath
editdev.sync_path_props()
return self._redefine_device(change, devobj)
def define_disk_readonly(self, devobj, do_readonly):
def change(editdev):
editdev.read_only = do_readonly
return self._redefine_device(change, devobj)
def define_disk_shareable(self, devobj, do_shareable):
def change(editdev):
editdev.shareable = do_shareable
return self._redefine_device(change, devobj)
def define_disk_removable(self, devobj, do_removable):
def change(editdev):
editdev.removable = do_removable
return self._redefine_device(change, devobj)
def define_disk_cache(self, devobj, new_cache):
def change(editdev):
editdev.driver_cache = new_cache or None
return self._redefine_device(change, devobj)
def define_disk_io(self, devobj, val):
def change(editdev):
editdev.driver_io = val or None
return self._redefine_device(change, devobj)
def define_disk_driver_type(self, devobj, new_driver_type):
def change(editdev):
editdev.driver_type = new_driver_type or None
return self._redefine_device(change, devobj)
def define_disk_bus(self, devobj, newval, addr):
def change(editdev):
oldprefix = editdev.get_target_prefix()[0]
oldbus = editdev.bus
editdev.bus = newval
if oldbus == newval:
return
editdev.address.clear()
editdev.address.set_addrstr(addr)
if oldprefix == editdev.get_target_prefix()[0]:
return
used = []
disks = (self.get_disk_devices() +
self.get_disk_devices(inactive=True))
for d in disks:
used.append(d.target)
if editdev.target:
used.remove(editdev.target)
editdev.target = None
editdev.generate_target(used)
return self._redefine_device(change, devobj)
def define_disk_serial(self, devobj, val):
def change(editdev):
if val != editdev.serial:
editdev.serial = val or None
return self._redefine_device(change, devobj)
def define_disk_iotune_rbs(self, devobj, val):
def change(editdev):
editdev.iotune_rbs = val
return self._redefine_device(change, devobj)
def define_disk_iotune_ris(self, devobj, val):
def change(editdev):
editdev.iotune_ris = val
return self._redefine_device(change, devobj)
def define_disk_iotune_tbs(self, devobj, val):
def change(editdev):
editdev.iotune_tbs = val
return self._redefine_device(change, devobj)
def define_disk_iotune_tis(self, devobj, val):
def change(editdev):
editdev.iotune_tis = val
return self._redefine_device(change, devobj)
def define_disk_iotune_wbs(self, devobj, val):
def change(editdev):
editdev.iotune_wbs = val
return self._redefine_device(change, devobj)
def define_disk_iotune_wis(self, devobj, val):
def change(editdev):
editdev.iotune_wis = val
return self._redefine_device(change, devobj)
# Network define methods
def define_network_source(self, devobj, newtype, newsource, newmode):
def change(editdev):
if not newtype:
return
editdev.source = None
editdev.type = newtype
editdev.source = newsource
editdev.source_mode = newmode or None
return self._redefine_device(change, devobj)
def define_network_model(self, devobj, newmodel, addr):
def change(editdev):
if editdev.model != newmodel:
editdev.address.clear()
editdev.address.set_addrstr(addr)
editdev.model = newmodel
return self._redefine_device(change, devobj)
def define_virtualport(self, devobj, newtype, newmanagerid,
newtypeid, newtypeidversion, newinstanceid):
def change(editdev):
editdev.virtualport.type = newtype or None
editdev.virtualport.managerid = newmanagerid or None
editdev.virtualport.typeid = newtypeid or None
editdev.virtualport.typeidversion = newtypeidversion or None
editdev.virtualport.instanceid = newinstanceid or None
return self._redefine_device(change, devobj)
# Graphics define methods
def define_graphics_password(self, devobj, newval):
def change(editdev):
editdev.passwd = newval or None
return self._redefine_device(change, devobj)
def define_graphics_keymap(self, devobj, newval):
def change(editdev):
editdev.keymap = newval
return self._redefine_device(change, devobj)
def define_graphics_type(self, devobj, newval):
def change(editdev):
editdev.type = newval
return self._redefine_device(change, devobj)
# Sound define methods
def define_sound_model(self, devobj, newmodel):
def change(editdev):
if editdev.model != newmodel:
editdev.address.clear()
editdev.model = newmodel
return self._redefine_device(change, devobj)
# Video define methods
def define_video_model(self, devobj, newmodel):
def change(editdev):
if newmodel == editdev.model:
return
editdev.model = newmodel
editdev.address.clear()
# Clear out heads/ram values so they reset to default. If
# we ever allow editing these values in the UI we should
# drop this
editdev.vram = None
editdev.heads = None
editdev.ram = None
return self._redefine_device(change, devobj)
# Watchdog define methods
def define_watchdog_model(self, devobj, newval):
def change(editdev):
if editdev.model != newval:
editdev.address.clear()
editdev.model = newval
return self._redefine_device(change, devobj)
def define_watchdog_action(self, devobj, newval):
def change(editdev):
editdev.action = newval
return self._redefine_device(change, devobj)
# Smartcard define methods
def define_smartcard_mode(self, devobj, newmodel):
def change(editdev):
editdev.mode = newmodel
editdev.type = editdev.TYPE_DEFAULT
return self._redefine_device(change, devobj)
# Controller define methods
def define_controller_model(self, devobj, newmodel):
def change(editdev):
ignore = editdev
guest = self._get_xmlobj_to_define()
ctrls = guest.get_devices("controller")
ctrls = [x for x in ctrls if (x.type ==
VirtualController.TYPE_USB)]
for dev in ctrls:
guest.remove_device(dev)
if newmodel == "ich9-ehci1":
for dev in VirtualController.get_usb2_controllers(
guest.conn):
guest.add_device(dev)
else:
dev = VirtualController(guest.conn)
dev.type = "usb"
if newmodel != "default":
dev.model = newmodel
guest.add_device(dev)
return self._redefine_device(change, devobj)
####################
# Hotplug routines #
####################
def attach_device(self, devobj):
"""
Hotplug device to running guest
"""
if not self.is_active():
return
devxml = devobj.get_xml_config()
self._backend.attachDevice(devxml)
def detach_device(self, devobj):
"""
Hotunplug device from running guest
"""
if not self.is_active():
return
xml = devobj.get_xml_config()
self._backend.detachDevice(xml)
def update_device(self, devobj, flags=1):
if not self.is_active():
return
# Default flag is VIR_DOMAIN_DEVICE_MODIFY_LIVE
xml = devobj.get_xml_config()
self._backend.updateDeviceFlags(xml, flags)
def hotplug_vcpus(self, vcpus):
vcpus = int(vcpus)
if vcpus != self.vcpu_count():
self._backend.setVcpus(vcpus)
def hotplug_memory(self, memory):
if memory != self.get_memory():
self._backend.setMemory(memory)
def hotplug_maxmem(self, maxmem):
if maxmem != self.maximum_memory():
self._backend.setMaxMemory(maxmem)
def hotplug_both_mem(self, memory, maxmem):
logging.info("Hotplugging curmem=%s maxmem=%s for VM '%s'",
memory, maxmem, self.get_name())
if self.is_active():
actual_cur = self.get_memory()
if memory:
if maxmem < actual_cur:
# Set current first to avoid error
self.hotplug_memory(memory)
self.hotplug_maxmem(maxmem)
else:
self.hotplug_maxmem(maxmem)
self.hotplug_memory(memory)
else:
self.hotplug_maxmem(maxmem)
def hotplug_storage_media(self, devobj, newpath):
devobj.path = newpath
self.attach_device(devobj)
def hotplug_graphics_password(self, devobj, newval):
devobj.passwd = newval
self.update_device(devobj)
def hotplug_description(self, desc):
# We already fake hotplug like behavior, by reading the
# description from the inactive XML from a running VM
#
# libvirt since 0.9.10 provides a SetMetadata API that provides
# actual <description> 'hotplug', and using that means checkig
# for support, version, etc.
if not self.conn.check_support(
self.conn.SUPPORT_DOMAIN_SET_METADATA, self._backend):
return
flags = (libvirt.VIR_DOMAIN_AFFECT_LIVE |
libvirt.VIR_DOMAIN_AFFECT_CONFIG)
self._backend.setMetadata(
libvirt.VIR_DOMAIN_METADATA_DESCRIPTION,
desc, None, None, flags)
def hotplug_title(self, title):
if not self.conn.check_support(
self.conn.SUPPORT_DOMAIN_SET_METADATA, self._backend):
return
flags = (libvirt.VIR_DOMAIN_AFFECT_LIVE |
libvirt.VIR_DOMAIN_AFFECT_CONFIG)
self._backend.setMetadata(
libvirt.VIR_DOMAIN_METADATA_TITLE,
title, None, None, flags)
########################
# Libvirt API wrappers #
########################
def _define(self, newxml):
self.conn.define_domain(newxml)
def _XMLDesc(self, flags):
return self._backend.XMLDesc(flags)
def pin_vcpu(self, vcpu_num, pinlist):
self._backend.pinVcpu(vcpu_num, pinlist)
def vcpu_info(self):
if self.is_active() and self.getvcpus_supported:
return self._backend.vcpus()
return [[], []]
def get_autostart(self):
return self._backend.autostart()
def set_autostart(self, val):
if self.get_autostart() == val:
return
self._backend.setAutostart(val)
def job_info(self):
return self._backend.jobInfo()
def abort_job(self):
self._backend.abortJob()
def open_console(self, devname, stream, flags=0):
return self._backend.openConsole(devname, stream, flags)
def refresh_snapshots(self):
self._snapshot_list = None
def list_snapshots(self):
if self._snapshot_list is None:
newlist = []
for rawsnap in self._backend.listAllSnapshots():
newlist.append(vmmDomainSnapshot(self.conn, rawsnap))
self._snapshot_list = newlist
return self._snapshot_list[:]
def revert_to_snapshot(self, snap):
self._backend.revertToSnapshot(snap.get_backend())
self.idle_add(self.force_update_status)
def create_snapshot(self, xml, redefine=False):
flags = 0
if redefine:
flags = (flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE)
if not redefine:
logging.debug("Creating snapshot flags=%s xml=\n%s", flags, xml)
self._backend.snapshotCreateXML(xml, flags)
########################
# XML Parsing routines #
########################
def is_container(self):
return self.get_xmlobj().os.is_container()
def is_xenpv(self):
return self.get_xmlobj().os.is_xenpv()
def is_hvm(self):
return self.get_xmlobj().os.is_hvm()
def get_uuid(self):
return self.uuid
def get_abi_type(self):
return self.get_xmlobj().os.os_type
def get_hv_type(self):
return self.get_xmlobj().type
def get_pretty_hv_type(self):
return uihelpers.pretty_hv(self.get_abi_type(), self.get_hv_type())
def get_arch(self):
return self.get_xmlobj().os.arch
def get_init(self):
return self.get_xmlobj().os.init
def get_emulator(self):
return self.get_xmlobj().emulator
def get_acpi(self):
return self.get_xmlobj().features.acpi
def get_apic(self):
return self.get_xmlobj().features.apic
def get_clock(self):
return self.get_xmlobj().clock.offset
def get_machtype(self):
return self.get_xmlobj().os.machine
def get_description(self):
# Always show the inactive <description>, let's us fake hotplug
# for a field that's strictly metadata
return self.get_xmlobj(inactive=True).description
def get_memory(self):
return int(self.get_xmlobj().memory)
def maximum_memory(self):
return int(self.get_xmlobj().maxmemory)
def vcpu_count(self):
guest = self.get_xmlobj()
return int(guest.curvcpus or
self._startup_vcpus or
guest.vcpus)
def vcpu_max_count(self):
guest = self.get_xmlobj()
has_xml_max = (guest.curvcpus != guest.vcpus)
if has_xml_max or not self.is_active():
return guest.vcpus
if self._startup_vcpus is None:
self._startup_vcpus = int(self.vcpu_count())
return int(self._startup_vcpus)
def vcpu_pinning(self):
return self.get_xmlobj().cpuset or ""
def get_cpu_config(self):
return self.get_xmlobj().cpu
def get_boot_device(self):
return self.get_xmlobj().os.bootorder
def get_boot_menu(self):
guest = self.get_xmlobj()
return bool(guest.os.enable_bootmenu)
def get_boot_kernel_info(self):
guest = self.get_xmlobj()
return (guest.os.kernel, guest.os.initrd,
guest.os.dtb, guest.os.kernel_args)
def get_seclabel(self):
seclabel = self.get_xmlobj().seclabel
model = seclabel.model
t = seclabel.type or "dynamic"
label = seclabel.label or ""
relabel = getattr(seclabel, "relabel", None)
if relabel is not None:
if relabel == "yes":
relabel = True
else:
relabel = False
return [model, t, label, relabel]
# XML Device listing
def get_serial_devs(self):
devs = self.get_char_devices()
devlist = []
devlist += [x for x in devs if x.virtual_device_type == "serial"]
devlist += [x for x in devs if x.virtual_device_type == "console"]
return devlist
def _build_device_list(self, device_type,
refresh_if_nec=True, inactive=False):
guest = self.get_xmlobj(refresh_if_nec=refresh_if_nec,
inactive=inactive)
devs = guest.get_devices(device_type)
count = 0
for dev in devs:
dev.vmmindex = count
count += 1
return devs
def get_network_devices(self, refresh_if_nec=True):
return self._build_device_list("interface", refresh_if_nec)
def get_video_devices(self):
return self._build_device_list("video")
def get_hostdev_devices(self):
return self._build_device_list("hostdev")
def get_watchdog_devices(self):
return self._build_device_list("watchdog")
def get_input_devices(self):
return self._build_device_list("input")
def get_graphics_devices(self):
return self._build_device_list("graphics")
def get_sound_devices(self):
return self._build_device_list("sound")
def get_controller_devices(self):
return self._build_device_list("controller")
def get_filesystem_devices(self):
return self._build_device_list("filesystem")
def get_smartcard_devices(self):
return self._build_device_list("smartcard")
def get_redirdev_devices(self):
return self._build_device_list("redirdev")
def get_tpm_devices(self):
return self._build_device_list("tpm")
def get_rng_devices(self):
return self._build_device_list("rng")
def get_disk_devices(self, refresh_if_nec=True, inactive=False):
devs = self._build_device_list("disk", refresh_if_nec, inactive)
# Iterate through all disks and calculate what number they are
# HACK: We are making a variable in VirtualDisk to store the index
idx_mapping = {}
for dev in devs:
devtype = dev.device
bus = dev.bus
key = devtype + (bus or "")
if key not in idx_mapping:
idx_mapping[key] = 1
dev.disk_bus_index = idx_mapping[key]
idx_mapping[key] += 1
return devs
def get_char_devices(self):
devs = []
serials = self._build_device_list("serial")
parallels = self._build_device_list("parallel")
consoles = self._build_device_list("console")
channels = self._build_device_list("channel")
for devicelist in [serials, parallels, consoles, channels]:
devs.extend(devicelist)
# Don't display <console> if it's just a duplicate of <serial>
if (len(consoles) > 0 and len(serials) > 0):
con = consoles[0]
ser = serials[0]
if (con.type == ser.type and
con.target_type is None or con.target_type == "serial"):
ser.virtmanager_console_dup = con
devs.remove(con)
return devs
############################
# Domain lifecycle methods #
############################
# All these methods are usually run asynchronously from threads, so
# let's be extra careful and have anything which might touch UI
# or GObject.props invoked in an idle callback
def _unregister_reboot_listener(self):
if self.reboot_listener is None:
return
try:
self.idle_add(self.disconnect, self.reboot_listener)
self.reboot_listener = None
except:
pass
def manual_reboot(self):
"""
Attempt a manual reboot by invoking 'shutdown', then listen
for a state change and restart the VM
"""
def reboot_listener(vm, ignore1, ignore2, self):
if vm.is_crashed():
# Abandon reboot plans
self.reboot_listener = None
return True
if not vm.is_shutoff():
# Not shutoff, continue waiting
return
try:
logging.debug("Fake reboot detected shutdown. Restarting VM")
vm.startup()
except:
logging.exception("Fake reboot startup failed")
self.reboot_listener = None
return True
self._unregister_reboot_listener()
# Request a shutdown
self.shutdown()
def add_reboot():
self.reboot_listener = self.connect_opt_out("status-changed",
reboot_listener, self)
self.idle_add(add_reboot)
def shutdown(self):
self._install_abort = True
self._unregister_reboot_listener()
self._backend.shutdown()
self.idle_add(self.force_update_status)
def reboot(self):
self._install_abort = True
self._backend.reboot(0)
self.idle_add(self.force_update_status)
def destroy(self):
self._install_abort = True
self._unregister_reboot_listener()
self._backend.destroy()
self.idle_add(self.force_update_status)
def reset(self):
self._install_abort = True
self._backend.reset(0)
self.idle_add(self.force_update_status)
def startup(self):
if self.get_cloning():
raise RuntimeError(_("Cannot start guest while cloning "
"operation in progress"))
pre_startup_ret = []
self.emit("pre-startup", pre_startup_ret)
for error in pre_startup_ret:
raise RuntimeError(error)
self._backend.create()
self.idle_add(self.force_update_status)
def suspend(self):
self._backend.suspend()
self.idle_add(self.force_update_status)
def delete(self, force=True):
flags = 0
if force:
flags |= getattr(libvirt,
"VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA", 0)
flags |= getattr(libvirt, "VIR_DOMAIN_UNDEFINE_MANAGED_SAVE", 0)
try:
self._backend.undefineFlags(flags)
except libvirt.libvirtError:
logging.exception("libvirt undefineFlags failed, "
"falling back to old style")
self._backend.undefine()
def resume(self):
if self.get_cloning():
raise RuntimeError(_("Cannot resume guest while cloning "
"operation in progress"))
self._backend.resume()
self.idle_add(self.force_update_status)
def hasSavedImage(self):
if not self.managedsave_supported:
return False
try:
return self._backend.hasManagedSaveImage(0)
except libvirt.libvirtError, e:
if not uihelpers.exception_is_libvirt_error(e, "VIR_ERR_NO_DOMAIN"):
raise
return False
def removeSavedImage(self):
if not self.hasSavedImage():
return
self._backend.managedSaveRemove(0)
def save(self, filename=None, meter=None):
self._install_abort = True
if meter:
start_job_progress_thread(self, meter, _("Saving domain to disk"))
if not self.managedsave_supported:
self._backend.save(filename)
else:
self._backend.managedSave(0)
self.idle_add(self.force_update_status)
def support_downtime(self):
return self.conn.check_support(
self.conn.SUPPORT_DOMAIN_MIGRATE_DOWNTIME, self._backend)
def migrate_set_max_downtime(self, max_downtime, flag=0):
self._backend.migrateSetMaxDowntime(max_downtime, flag)
def migrate(self, destconn, interface=None, rate=0,
live=False, secure=False, meter=None):
self._install_abort = True
newname = None
flags = 0
if self.status() == libvirt.VIR_DOMAIN_RUNNING and live:
flags |= libvirt.VIR_MIGRATE_LIVE
if secure:
flags |= libvirt.VIR_MIGRATE_PEER2PEER
flags |= libvirt.VIR_MIGRATE_TUNNELLED
destconn = destconn.get_backend().libvirtconn
logging.debug("Migrating: conn=%s flags=%s dname=%s uri=%s rate=%s",
destconn, flags, newname, interface, rate)
if meter:
start_job_progress_thread(self, meter, _("Migrating domain"))
self._backend.migrate(destconn, flags, newname, interface, rate)
def define_cb():
newxml = self.get_xml(inactive=True)
destconn.define_domain(newxml)
self.idle_add(define_cb)
# Don't schedule any conn update, migrate dialog handles it for us
###################
# Stats helpers ###
###################
def _sample_mem_stats(self, info):
curmem = info[2]
if not self.is_active():
curmem = 0
pcentCurrMem = curmem * 100.0 / self.maximum_memory()
pcentCurrMem = max(0.0, min(pcentCurrMem, 100.0))
return pcentCurrMem, curmem
def _sample_cpu_stats(self, info, now):
prevCpuTime = 0
prevTimestamp = 0
cpuTime = 0
cpuTimeAbs = 0
pcentHostCpu = 0
pcentGuestCpu = 0
if len(self.record) > 0:
prevTimestamp = self.record[0]["timestamp"]
prevCpuTime = self.record[0]["cpuTimeAbs"]
if not (info[0] in [libvirt.VIR_DOMAIN_SHUTOFF,
libvirt.VIR_DOMAIN_CRASHED]):
guestcpus = info[3]
cpuTime = info[4] - prevCpuTime
cpuTimeAbs = info[4]
hostcpus = self.conn.host_active_processor_count()
pcentbase = (((cpuTime) * 100.0) /
((now - prevTimestamp) * 1000.0 * 1000.0 * 1000.0))
pcentHostCpu = pcentbase / hostcpus
pcentGuestCpu = pcentbase / guestcpus
pcentHostCpu = max(0.0, min(100.0, pcentHostCpu))
pcentGuestCpu = max(0.0, min(100.0, pcentGuestCpu))
return cpuTime, cpuTimeAbs, pcentHostCpu, pcentGuestCpu
def _get_cur_rate(self, what):
if len(self.record) > 1:
ret = (float(self.record[0][what] -
self.record[1][what]) /
float(self.record[0]["timestamp"] -
self.record[1]["timestamp"]))
else:
ret = 0.0
return max(ret, 0, 0) # avoid negative values at poweroff
def _set_max_rate(self, record, what):
if record[what] > self.maxRecord[what]:
self.maxRecord[what] = record[what]
def _get_max_rate(self, name1, name2):
return float(max(self.maxRecord[name1], self.maxRecord[name2]))
def _get_record_helper(self, record_name):
if len(self.record) == 0:
return 0
return self.record[0][record_name]
def _vector_helper(self, record_name):
vector = []
stats = self.record
for i in range(self.config.get_stats_history_length() + 1):
if i < len(stats):
vector.append(stats[i][record_name] / 100.0)
else:
vector.append(0)
return vector
def _in_out_vector_helper(self, name1, name2, ceil):
vector = []
stats = self.record
if ceil is None:
ceil = self._get_max_rate(name1, name2)
maxlen = self.config.get_stats_history_length()
for n in [name1, name2]:
for i in range(maxlen + 1):
if i < len(stats):
vector.append(float(stats[i][n]) / ceil)
else:
vector.append(0.0)
return vector
def in_out_vector_limit(self, data, limit):
l = len(data) / 2
end = min(l, limit)
if l > limit:
data = data[0:end] + data[l:l + end]
return [(x + y) / 2 for x, y in zip(data[0:end], data[end:end * 2])]
def toggle_sample_network_traffic(self, ignore=None):
self._enable_net_poll = self.config.get_stats_enable_net_poll()
if self._enable_net_poll and len(self.record) > 1:
rxBytes, txBytes = self._sample_network_traffic()
self.record[0]["netRxKB"] = rxBytes / 1024
self.record[0]["netTxKB"] = txBytes / 1024
def toggle_sample_disk_io(self, ignore=None):
self._enable_disk_poll = self.config.get_stats_enable_disk_poll()
if self._enable_disk_poll and len(self.record) > 1:
rdBytes, wrBytes = self._sample_disk_io()
self.record[0]["diskRdKB"] = rdBytes / 1024
self.record[0]["diskWrKB"] = wrBytes / 1024
###################
# Stats accessors #
###################
def stats_memory(self):
return self._get_record_helper("curmem")
def cpu_time(self):
return self._get_record_helper("cpuTime")
def host_cpu_time_percentage(self):
return self._get_record_helper("cpuHostPercent")
def guest_cpu_time_percentage(self):
return self._get_record_helper("cpuGuestPercent")
def network_rx_rate(self):
return self._get_record_helper("netRxRate")
def network_tx_rate(self):
return self._get_record_helper("netTxRate")
def disk_read_rate(self):
return self._get_record_helper("diskRdRate")
def disk_write_rate(self):
return self._get_record_helper("diskWrRate")
def get_memory_pretty(self):
return util.pretty_mem(self.get_memory())
def maximum_memory_pretty(self):
return util.pretty_mem(self.maximum_memory())
def network_traffic_rate(self):
return self.network_tx_rate() + self.network_rx_rate()
def network_traffic_max_rate(self):
return self._get_max_rate("netRxRate", "netTxRate")
def disk_io_rate(self):
return self.disk_read_rate() + self.disk_write_rate()
def disk_io_max_rate(self):
return self._get_max_rate("diskRdRate", "diskWrRate")
def host_cpu_time_vector(self):
return self._vector_helper("cpuHostPercent")
def guest_cpu_time_vector(self):
return self._vector_helper("cpuGuestPercent")
def stats_memory_vector(self):
return self._vector_helper("currMemPercent")
def network_traffic_vector(self, ceil=None):
return self._in_out_vector_helper("netRxRate", "netTxRate", ceil)
def disk_io_vector(self, ceil=None):
return self._in_out_vector_helper("diskRdRate", "diskWrRate", ceil)
def host_cpu_time_vector_limit(self, limit):
cpudata = self.host_cpu_time_vector()
if len(cpudata) > limit:
cpudata = cpudata[0:limit]
return cpudata
def guest_cpu_time_vector_limit(self, limit):
cpudata = self.guest_cpu_time_vector()
if len(cpudata) > limit:
cpudata = cpudata[0:limit]
return cpudata
def network_traffic_vector_limit(self, limit, ceil=None):
return self.in_out_vector_limit(self.network_traffic_vector(ceil),
limit)
def disk_io_vector_limit(self, limit, ceil=None):
return self.in_out_vector_limit(self.disk_io_vector(ceil), limit)
###################
# Status helpers ##
###################
def _update_start_vcpus(self, ignore, oldstatus, status):
ignore = status
if oldstatus not in [libvirt.VIR_DOMAIN_SHUTDOWN,
libvirt.VIR_DOMAIN_SHUTOFF,
libvirt.VIR_DOMAIN_CRASHED]:
return
# Want to track the startup vcpu amount, which is the
# cap of how many VCPUs can be added
self._startup_vcpus = None
self.vcpu_max_count()
def _normalize_status(self, status):
if status == libvirt.VIR_DOMAIN_NOSTATE:
return libvirt.VIR_DOMAIN_RUNNING
elif status == libvirt.VIR_DOMAIN_BLOCKED:
return libvirt.VIR_DOMAIN_RUNNING
return status
def is_active(self):
return not self.is_shutoff()
def is_shutoff(self):
return self.status() == libvirt.VIR_DOMAIN_SHUTOFF
def is_crashed(self):
return self.status() == libvirt.VIR_DOMAIN_CRASHED
def is_stoppable(self):
return self.status() in [libvirt.VIR_DOMAIN_RUNNING,
libvirt.VIR_DOMAIN_PAUSED]
def is_destroyable(self):
return (self.is_stoppable() or
self.status() in [libvirt.VIR_DOMAIN_CRASHED])
def is_runable(self):
return self.status() in [libvirt.VIR_DOMAIN_SHUTOFF,
libvirt.VIR_DOMAIN_CRASHED]
def is_pauseable(self):
return self.status() in [libvirt.VIR_DOMAIN_RUNNING]
def is_unpauseable(self):
return self.status() in [libvirt.VIR_DOMAIN_PAUSED]
def is_paused(self):
return self.status() in [libvirt.VIR_DOMAIN_PAUSED]
def run_status(self):
return self.pretty_run_status(self.status(), self.hasSavedImage())
def run_status_icon_name(self):
status = self.status()
if status not in uihelpers.vm_status_icons:
logging.debug("Unknown status %d, using NOSTATE", status)
status = libvirt.VIR_DOMAIN_NOSTATE
return uihelpers.vm_status_icons[status]
def force_update_status(self):
"""
Fetch current domain state and clear status cache
"""
try:
info = self._backend.info()
self._update_status(info[0])
except libvirt.libvirtError, e:
if uihelpers.exception_is_libvirt_error(e, "VIR_ERR_NO_DOMAIN"):
return
raise
def _update_status(self, status):
"""
Internal helper to change cached status to 'status' and signal
clients if we actually changed state
"""
status = self._normalize_status(status)
if status == self.lastStatus:
return
oldstatus = self.lastStatus
self.lastStatus = status
# Send 'config-changed' before a status-update, so users
# are operating with fresh XML
self.refresh_xml()
self.idle_emit("status-changed", oldstatus, status)
def inspection_data_updated(self):
self.idle_emit("inspection-changed")
##################
# config helpers #
##################
def on_console_scaling_changed(self, *args, **kwargs):
return self.config.listen_pervm(self.uuid, "/scaling",
*args, **kwargs)
def set_console_scaling(self, value):
self.config.set_pervm(self.uuid, "/scaling", value)
def get_console_scaling(self):
ret = self.config.get_pervm(self.uuid, "/scaling")
if ret == -1:
return self.config.get_console_scaling()
return ret
def set_details_window_size(self, w, h):
self.config.set_pervm(self.uuid, "/vm-window-size", (w, h))
def get_details_window_size(self):
ret = self.config.get_pervm(self.uuid, "/vm-window-size")
return ret
def get_console_password(self):
return self.config.get_pervm(self.uuid, "/console-password")
def set_console_password(self, username, keyid):
return self.config.set_pervm(self.uuid, "/console-password",
(username, keyid))
def get_cache_dir(self):
ret = os.path.join(self.conn.get_cache_dir(), self.get_uuid())
if not os.path.exists(ret):
os.makedirs(ret, 0755)
return ret
###################
# Polling helpers #
###################
def _sample_network_traffic(self):
rx = 0
tx = 0
if (not self._stats_net_supported or
not self._enable_net_poll or
not self.is_active()):
return rx, tx
for netdev in self.get_network_devices(refresh_if_nec=False):
dev = netdev.target_dev
if not dev:
continue
if dev in self._stats_net_skip:
continue
try:
io = self._backend.interfaceStats(dev)
if io:
rx += io[0]
tx += io[4]
except libvirt.libvirtError, err:
if util.is_error_nosupport(err):
logging.debug("Net stats not supported: %s", err)
self._stats_net_supported = False
else:
logging.error("Error reading net stats for "
"'%s' dev '%s': %s",
self.get_name(), dev, err)
if self.is_active():
logging.debug("Adding %s to skip list", dev)
self._stats_net_skip.append(dev)
else:
logging.debug("Aren't running, don't add to skiplist")
return rx, tx
def _sample_disk_io(self):
rd = 0
wr = 0
if (not self._stats_disk_supported or
not self._enable_disk_poll or
not self.is_active()):
return rd, wr
for disk in self.get_disk_devices(refresh_if_nec=False):
dev = disk.target
if not dev:
continue
if dev in self._stats_disk_skip:
continue
try:
io = self._backend.blockStats(dev)
if io:
rd += io[1]
wr += io[3]
except libvirt.libvirtError, err:
if util.is_error_nosupport(err):
logging.debug("Disk stats not supported: %s", err)
self._stats_disk_supported = False
else:
logging.error("Error reading disk stats for "
"'%s' dev '%s': %s",
self.get_name(), dev, err)
if self.is_active():
logging.debug("Adding %s to skip list", dev)
self._stats_disk_skip.append(dev)
else:
logging.debug("Aren't running, don't add to skiplist")
return rd, wr
def tick(self, stats_update=True):
self._invalidate_xml()
info = self._backend.info()
if stats_update:
self._tick_stats(info)
self._update_status(info[0])
if stats_update:
self.idle_emit("resources-sampled")
def _tick_stats(self, info):
expected = self.config.get_stats_history_length()
current = len(self.record)
if current > expected:
del self.record[expected:current]
# Xen reports complete crap for Dom0 max memory
# (ie MAX_LONG) so lets clamp it to the actual
# physical RAM in machine which is the effective
# real world limit
if (self.conn.is_xen() and
self.is_management_domain()):
info[1] = self.conn.host_memory_size()
now = time.time()
(cpuTime, cpuTimeAbs,
pcentHostCpu, pcentGuestCpu) = self._sample_cpu_stats(info, now)
pcentCurrMem, curmem = self._sample_mem_stats(info)
rdBytes, wrBytes = self._sample_disk_io()
rxBytes, txBytes = self._sample_network_traffic()
newStats = {
"timestamp": now,
"cpuTime": cpuTime,
"cpuTimeAbs": cpuTimeAbs,
"cpuHostPercent": pcentHostCpu,
"cpuGuestPercent": pcentGuestCpu,
"curmem": curmem,
"currMemPercent": pcentCurrMem,
"diskRdKB": rdBytes / 1024,
"diskWrKB": wrBytes / 1024,
"netRxKB": rxBytes / 1024,
"netTxKB": txBytes / 1024,
}
for r in ["diskRd", "diskWr", "netRx", "netTx"]:
newStats[r + "Rate"] = self._get_cur_rate(r + "KB")
self._set_max_rate(newStats, r + "Rate")
self.record.insert(0, newStats)
########################
# Libvirt domain class #
########################
class vmmDomainVirtinst(vmmDomain):
"""
Domain object backed by a virtinst Guest object.
Used for launching a details window for customizing a VM before install.
"""
def __init__(self, conn, backend, key):
vmmDomain.__init__(self, conn, backend, key)
self._orig_xml = ""
def get_name(self):
return self._backend.name
def get_id(self):
return -1
def hasSavedImage(self):
return False
def _XMLDesc(self, flags):
raise RuntimeError("Shouldn't be called")
def get_xml(self, *args, **kwargs):
ignore = args
ignore = kwargs
return self._backend.get_install_xml(install=False)
def _refresh_orig_xml(self):
# We need to cache origxml in order to have something to diff against
if not self._orig_xml:
self._orig_xml = self._backend.get_xml_config()
def get_xmlobj(self, inactive=False, refresh_if_nec=True):
self._refresh_orig_xml()
return self._backend
def _reparse_xml(self, *args, **kwargs):
ignore = args
ignore = kwargs
def _define(self, newxml):
ignore = newxml
self._orig_xml = ""
self.emit("config-changed")
def _redefine_xml(self, newxml):
return self._redefine_helper(self._orig_xml, newxml)
def refresh_xml(self, forcesignal=False):
# No caching, so no refresh needed
return
def snapshots_supported(self):
return False
def get_autostart(self):
return self._backend.autostart
def set_autostart(self, val):
self._backend.autostart = bool(val)
self.emit("config-changed")
def define_name(self, newname):
def change(guest):
guest.name = str(newname)
return self._redefine(change)
| gpl-2.0 | 5,290,043,022,076,482,000 | 32.375741 | 80 | 0.56443 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.