repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
xmnlab/minilab | ia/ocr/OpenANPR/experimental/exp4.py | 1 | 2703 | #!/usr/bin/env python
# encoding: utf-8
"""
exp4.py
Experiment 4: playing around with brightness/contrast enhancement.
Created by Oliver Smith on 2009-08-21.
Copyright (c) 2009 Oliver Smith. All rights reserved.
"""
from getopt import getopt
import sys
from opencv.cv import *
from opencv.highgui import *
def quick_show(image):
"""Display an image on the screen.
Quick 'n' dirty method to throw up a window with an image in it and
wait for the user to dismiss it.
"""
cvNamedWindow("bar")
cvShowImage("bar", image)
cvWaitKey(0)
cvDestroyWindow("bar")
def max_contrast(image):
"""Maximise the contrast on the image using top and bottom hat filters.
"""
size = cvGetSize(image)
bh = cvCreateImage(size, IPL_DEPTH_8U, 1)
th = cvCreateImage(size, IPL_DEPTH_8U, 1)
s1 = cvCreateImage(size, IPL_DEPTH_8U, 1)
s2 = cvCreateImage(size, IPL_DEPTH_8U, 1)
el = cvCreateStructuringElementEx(3, 3, 2, 2, CV_SHAPE_ELLIPSE)
cvMorphologyEx(image, th, None, el, CV_MOP_TOPHAT, 1)
cvMorphologyEx(image, bh, None, el, CV_MOP_BLACKHAT, 1)
cvAdd(image, th, s1)
cvSub(s1, bh, s2)
return s2
def generate_tree(image):
"""Generate the component tree.
Incrementally threshold the image, doing connected component analysis
at each level and building up a tree of related components at each
threshold level.
"""
size = cvGetSize(image)
for level in range(1,255):
# TODO
pass
def prepare_image(filename, invert=False):
"""Prepare an image file for processing.
Loads the file, converts the image to greyscale, enhances the contrast
and (optionally) inverts it.
"""
ret = None
image = cvLoadImage(filename)
size = cvGetSize(image)
if image:
# convert to greyscale
grey = cvCreateImage(size, IPL_DEPTH_8U, 1)
cvCvtColor(image, grey, CV_BGR2GRAY)
cvReleaseImage(image)
# maximise contrast
eq_grey = cvCreateImage(size, IPL_DEPTH_8U, 1)
cvEqualizeHist(grey, eq_grey)
cvReleaseImage(grey)
# (optionally) invert
if invert:
ret = cvCreateImage(size, IPL_DEPTH_8U, 1)
cvXorS(eq_grey, cvScalar(255), ret)
cvReleaseImage(eq_grey)
else:
ret = eq_grey
return ret
def syntax():
"""Print the command line syntax."""
msgs = [
"Syntax: exp4.py [-i] imagefile",
"Options:",
" -i Invert the image (select dark objects)"]
print "\n".join(msgs)
def main():
"""Parse the command line and set off processing."""
# parse command line
opts,args = getopt(sys.argv[1:], "i")
if len(args) != 1:
syntax()
return 1
# grab options
invert = False
for n,v in opts:
if n == '-i':
invert = True
# prepare image
image = prepare_image(args[0], invert)
quick_show(image)
quick_show(max_contrast(image))
if __name__ == '__main__':
main()
| gpl-3.0 | 4,285,612,254,806,092,300 | 23.351351 | 72 | 0.695893 | false |
alexei38/gshell | src/broadcast.py | 1 | 3310 | # -*- coding: utf-8 -*-
import gtk
class GshellBroadcastDialog(gtk.Dialog):
def __init__(self, main_window, *args):
gtk.Dialog.__init__(self, *args)
self.config = main_window.config
self.main_window = main_window
self.build_dialog()
self.run_window()
def build_dialog(self):
self.set_default_size(400, 500)
self.add_buttons(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK)
self.set_title("Enable Broadcast")
self.store = gtk.ListStore(bool, str, str, int, str)
self.tree = gtk.TreeView(self.store)
renderer = gtk.CellRendererToggle()
renderer.connect("toggled", self.on_cell_toggled)
column = gtk.TreeViewColumn('Broadcast', renderer, active=0)
column.set_sort_column_id(0)
self.tree.append_column(column)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn('Terminal Name', renderer, text=1)
column.set_sort_column_id(1)
self.tree.append_column(column)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn('Host', renderer, text=2)
column.set_sort_column_id(2)
self.tree.append_column(column)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn('Page', renderer, text=3)
column.set_visible(False)
self.tree.append_column(column)
combo_store = gtk.ListStore(str)
for color in ['blue', 'red', 'green', 'yellow']:
combo_store.append([color])
renderer = gtk.CellRendererCombo()
renderer.set_property('editable', True)
renderer.set_property('has-entry', True)
renderer.set_property("text-column", 0)
renderer.set_property("model", combo_store)
renderer.connect("edited", self.on_combo_changed)
column = gtk.TreeViewColumn('Group', renderer, text=4)
self.tree.append_column(column)
scroll = gtk.ScrolledWindow()
scroll.add(self.tree)
self.vbox.pack_start(scroll, True, True, 0)
terminals = self.main_window.notebook.get_all_terminals()
for terminal in terminals:
label = terminal.label.label.get_text().strip()
host = None
if terminal.host:
host = terminal.host['host']
page_num = self.main_window.notebook.get_page_by_terminal(terminal)
self.store.append((terminal.broadcast, label, host, page_num, terminal.group))
self.show_all()
def run_window(self):
response = self.run()
if response == gtk.RESPONSE_OK:
for row in self.store:
broadcast = self.store.get_value(row.iter, 0)
page_num = self.store.get_value(row.iter, 3)
group = self.store.get_value(row.iter, 4)
terminal = self.main_window.notebook.get_terminal_by_page(page_num)
terminal.group = group
terminal.disable_broadcast()
if broadcast:
terminal.enable_broadcast()
self.destroy()
def on_cell_toggled(self, widget, path):
self.store[path][0] = not self.store[path][0]
def on_combo_changed(self, widget, path, text):
self.store[path][4] = text
| gpl-3.0 | -1,497,308,856,532,478,200 | 36.191011 | 94 | 0.602417 | false |
msiemens/tinydb | tests/test_tables.py | 1 | 3353 | import pytest # type: ignore
import re
from tinydb import where
def test_next_id(db):
db.truncate()
assert db._get_next_id() == 1
assert db._get_next_id() == 2
assert db._get_next_id() == 3
def test_tables_list(db):
db.table('table1').insert({'a': 1})
db.table('table2').insert({'a': 1})
assert db.tables() == {'_default', 'table1', 'table2'}
def test_one_table(db):
table1 = db.table('table1')
table1.insert_multiple({'int': 1, 'char': c} for c in 'abc')
assert table1.get(where('int') == 1)['char'] == 'a'
assert table1.get(where('char') == 'b')['char'] == 'b'
def test_multiple_tables(db):
table1 = db.table('table1')
table2 = db.table('table2')
table3 = db.table('table3')
table1.insert({'int': 1, 'char': 'a'})
table2.insert({'int': 1, 'char': 'b'})
table3.insert({'int': 1, 'char': 'c'})
assert table1.count(where('char') == 'a') == 1
assert table2.count(where('char') == 'b') == 1
assert table3.count(where('char') == 'c') == 1
db.drop_tables()
assert len(table1) == 0
assert len(table2) == 0
assert len(table3) == 0
def test_caching(db):
table1 = db.table('table1')
table2 = db.table('table1')
assert table1 is table2
def test_query_cache(db):
query1 = where('int') == 1
assert db.count(query1) == 3
assert query1 in db._query_cache
assert db.count(query1) == 3
assert query1 in db._query_cache
query2 = where('int') == 0
assert db.count(query2) == 0
assert query2 in db._query_cache
assert db.count(query2) == 0
assert query2 in db._query_cache
def test_zero_cache_size(db):
table = db.table('table3', cache_size=0)
query = where('int') == 1
table.insert({'int': 1})
table.insert({'int': 1})
assert table.count(query) == 2
assert table.count(where('int') == 2) == 0
assert len(table._query_cache) == 0
def test_query_cache_size(db):
table = db.table('table3', cache_size=1)
query = where('int') == 1
table.insert({'int': 1})
table.insert({'int': 1})
assert table.count(query) == 2
assert table.count(where('int') == 2) == 0
assert len(table._query_cache) == 1
def test_lru_cache(db):
# Test integration into TinyDB
table = db.table('table3', cache_size=2)
query = where('int') == 1
table.search(query)
table.search(where('int') == 2)
table.search(where('int') == 3)
assert query not in table._query_cache
table.remove(where('int') == 1)
assert not table._query_cache.lru
table.search(query)
assert len(table._query_cache) == 1
table.clear_cache()
assert len(table._query_cache) == 0
def test_table_is_iterable(db):
table = db.table('table1')
table.insert_multiple({'int': i} for i in range(3))
assert [r for r in table] == table.all()
def test_table_name(db):
name = 'table3'
table = db.table(name)
assert name == table.name
with pytest.raises(AttributeError):
table.name = 'foo'
def test_table_repr(db):
name = 'table4'
table = db.table(name)
assert re.match(
r"<Table name=\'table4\', total=0, "
r"storage=<tinydb\.storages\.MemoryStorage object at [a-zA-Z0-9]+>>",
repr(table))
def test_truncate_table(db):
db.truncate()
assert db._get_next_id() == 1
| mit | -5,822,757,506,467,983,000 | 21.503356 | 77 | 0.590516 | false |
dfm/ketu | ketu/feature_extract.py | 1 | 3169 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["FeatureExtract"]
import os
import h5py
import numpy as np
from .pipeline import Pipeline
def check_orbits(p1, t1, p2, t2, tmn, tmx, tol):
n1 = t1 + p1 * np.arange(np.floor((tmn-t1)/p1), np.ceil((tmx-t1)/p1))
n1 = n1[(tmn <= n1) * (n1 <= tmx)]
n2 = t2 + p2 * np.arange(np.floor((tmn-t2)/p2), np.ceil((tmx-t2)/p2))
n2 = n2[(tmn <= n2) * (n2 <= tmx)]
delta = np.fabs(n1[:, None] - n2[None, :])
return max(len(n1), len(n2)) == np.sum(delta < tol)
class FeatureExtract(Pipeline):
cache_ext = ".h5"
query_parameters = dict()
def get_result(self, query, parent_response):
# Build a data type with the peak data.
peaks = parent_response.peaks
dtype = [(k, float) for k in sorted(peaks[0].keys())]
# Add in the meta data columns.
dtype += [
("meta_starid", int), ("meta_hasinj", bool), ("meta_isrec", bool),
("meta_inverted", bool),
("inj_period", float), ("inj_t0", float), ("inj_radius", float),
("inj_b", float), ("inj_e", float), ("inj_pomega", float),
]
injections = query.get("injections", [])
inj_cols = ["period", "t0", "radius", "b", "e", "pomega"]
# Find the minimum and maximum observed times.
lcs = parent_response.model_light_curves
tmn = np.min([np.min(lc.time) for lc in lcs])
tmx = np.max([np.max(lc.time) for lc in lcs])
# Loop over the peaks and insert the data.
features = np.empty(len(peaks), dtype=dtype)
for i, peak in enumerate(peaks):
features[i]["meta_starid"] = parent_response.starid
features[i]["meta_inverted"] = query.get("invert", False)
# Peak data.
for k, v in peak.items():
features[i][k] = v
# Injections.
for k in inj_cols:
features[i]["inj_" + k] = np.nan
if len(injections):
features[i]["meta_hasinj"] = True
isinj = False
for inj in injections:
if check_orbits(
peak["period"], peak["t0"],
inj["period"], inj["t0"],
tmn, tmx, 0.5 * peak["duration"]):
isinj = True
for k in inj_cols:
features[i]["inj_" + k] = inj[k]
break
features[i]["meta_isrec"] = isinj
else:
features[i]["meta_hasinj"] = False
features[i]["meta_isrec"] = False
return dict(features=features)
def save_to_cache(self, fn, response):
try:
os.makedirs(os.path.dirname(fn))
except os.error:
pass
with h5py.File(fn, "w") as f:
f.create_dataset("features", data=response["features"])
def load_from_cache(self, fn):
if os.path.exists(fn):
with h5py.File(fn, "r") as f:
return dict(features=f["features"][...])
return None
| mit | -7,606,553,224,304,413,000 | 33.075269 | 78 | 0.497949 | false |
hgsoft/hgsoft-addons | custom_survey_multi_emails_and_portal/__manifest__.py | 1 | 1869 | # -*- coding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2018 HGSOFT - www.hgsoft.com.br #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
{
'name': "Custom Survey Multi Emails and Portal",
'summary': """Custom Survey Multi Emails and Portal""",
'author': "HGSoft - Soluções Criativas e Inteligentes",
'website': "http://www.hgsoft.com.br/",
'category': 'Marketing',
'version': '11.0.0',
'depends': ['base', 'survey', 'portal'],
'data': [
'views/custom_partner.xml',
'views/custom_user.xml',
'views/surveys_portal_template.xml'
],
} | gpl-3.0 | 9,192,320,446,770,729,000 | 43.47619 | 79 | 0.429031 | false |
coxmediagroup/django-threadedcomments | threadedcomments/migrations/0001_initial.py | 1 | 7099 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ThreadedComment'
db.create_table('threadedcomments_comment', (
('comment_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['comments.Comment'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.TextField')(blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='children', null=True, blank=True, to=orm['threadedcomments.ThreadedComment'])),
('last_child', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['threadedcomments.ThreadedComment'], null=True, blank=True)),
('tree_path', self.gf('django.db.models.fields.TextField')(db_index=True)),
))
db.send_create_signal('threadedcomments', ['ThreadedComment'])
def backwards(self, orm):
# Deleting model 'ThreadedComment'
db.delete_table('threadedcomments_comment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'threadedcomments.threadedcomment': {
'Meta': {'object_name': 'ThreadedComment', 'db_table': "'threadedcomments_comment'", '_ormbases': ['comments.Comment']},
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'last_child': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['threadedcomments.ThreadedComment']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'children'", 'null': 'True', 'blank': 'True', 'to': "orm['threadedcomments.ThreadedComment']"}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tree_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
}
}
complete_apps = ['threadedcomments']
| bsd-3-clause | -6,179,495,202,345,432,000 | 72.185567 | 206 | 0.568953 | false |
0xffea/keystone | keystone/token/backends/memcache.py | 1 | 4868 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import copy
import memcache
from keystone.common import utils
from keystone import config
from keystone import exception
from keystone.openstack.common import jsonutils
from keystone import token
CONF = config.CONF
config.register_str('servers', group='memcache', default='localhost:11211')
class Token(token.Driver):
revocation_key = 'revocation-list'
def __init__(self, client=None):
self._memcache_client = client
@property
def client(self):
return self._memcache_client or self._get_memcache_client()
def _get_memcache_client(self):
memcache_servers = CONF.memcache.servers.split(',')
self._memcache_client = memcache.Client(memcache_servers, debug=0)
return self._memcache_client
def _prefix_token_id(self, token_id):
return 'token-%s' % token_id.encode('utf-8')
def _prefix_user_id(self, user_id):
return 'usertokens-%s' % user_id.encode('utf-8')
def get_token(self, token_id):
if token_id is None:
raise exception.TokenNotFound(token_id='')
ptk = self._prefix_token_id(token_id)
token = self.client.get(ptk)
if token is None:
raise exception.TokenNotFound(token_id=token_id)
return token
def create_token(self, token_id, data):
data_copy = copy.deepcopy(data)
ptk = self._prefix_token_id(token.unique_id(token_id))
if not data_copy.get('expires'):
data_copy['expires'] = token.default_expire_time()
kwargs = {}
if data_copy['expires'] is not None:
expires_ts = utils.unixtime(data_copy['expires'])
kwargs['time'] = expires_ts
self.client.set(ptk, data_copy, **kwargs)
if 'id' in data['user']:
token_data = jsonutils.dumps(token_id)
user_id = data['user']['id']
user_key = self._prefix_user_id(user_id)
if not self.client.append(user_key, ',%s' % token_data):
if not self.client.add(user_key, token_data):
if not self.client.append(user_key, ',%s' % token_data):
msg = _('Unable to add token user list.')
raise exception.UnexpectedError(msg)
return copy.deepcopy(data_copy)
def _add_to_revocation_list(self, data):
data_json = jsonutils.dumps(data)
if not self.client.append(self.revocation_key, ',%s' % data_json):
if not self.client.add(self.revocation_key, data_json):
if not self.client.append(self.revocation_key,
',%s' % data_json):
msg = _('Unable to add token to revocation list.')
raise exception.UnexpectedError(msg)
def delete_token(self, token_id):
# Test for existence
data = self.get_token(token.unique_id(token_id))
ptk = self._prefix_token_id(token.unique_id(token_id))
result = self.client.delete(ptk)
self._add_to_revocation_list(data)
return result
def list_tokens(self, user_id, tenant_id=None, trust_id=None):
tokens = []
user_key = self._prefix_user_id(user_id)
user_record = self.client.get(user_key) or ""
token_list = jsonutils.loads('[%s]' % user_record)
for token_id in token_list:
ptk = self._prefix_token_id(token_id)
token_ref = self.client.get(ptk)
if token_ref:
if tenant_id is not None:
tenant = token_ref.get('tenant')
if not tenant:
continue
if tenant.get('id') != tenant_id:
continue
if trust_id is not None:
trust = token_ref.get('trust_id')
if not trust:
continue
if trust != trust_id:
continue
tokens.append(token_id)
return tokens
def list_revoked_tokens(self):
list_json = self.client.get(self.revocation_key)
if list_json:
return jsonutils.loads('[%s]' % list_json)
return []
| apache-2.0 | 8,416,145,999,367,778,000 | 36.160305 | 76 | 0.591413 | false |
gypleon/DeepLearningProject | deprecated_codes/evaluate.py | 1 | 5339 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
import model
from data_reader import load_data, DataReader
flags = tf.flags
# data
flags.DEFINE_string('data_dir', 'data', 'data directory. Should contain train.txt/valid.txt/test.txt with input data')
flags.DEFINE_string('train_dir', 'cv', 'training directory (models and summaries are saved there periodically)')
flags.DEFINE_string('load_model', 'cv', '(optional) filename of the model to load. Useful for re-starting training from a checkpoint')
# model params
flags.DEFINE_integer('rnn_size', 650, 'size of LSTM internal state')
flags.DEFINE_integer('highway_layers', 2, 'number of highway layers')
flags.DEFINE_integer('char_embed_size', 15, 'dimensionality of character embeddings')
flags.DEFINE_string ('kernels', '[1,2,3,4,5,6,7]', 'CNN kernel widths')
flags.DEFINE_string ('kernel_features', '[50,100,150,200,200,200,200]', 'number of features in the CNN kernel')
flags.DEFINE_integer('rnn_layers', 2, 'number of layers in the LSTM')
flags.DEFINE_float ('dropout', 0.5, 'dropout. 0 = no dropout')
# optimization
flags.DEFINE_integer('num_unroll_steps', 35, 'number of timesteps to unroll for')
flags.DEFINE_integer('batch_size', 20, 'number of sequences to train on in parallel')
flags.DEFINE_integer('max_word_length', 65, 'maximum word length')
# bookkeeping
flags.DEFINE_integer('seed', 3435, 'random number generator seed')
flags.DEFINE_string ('EOS', '+', '<EOS> symbol. should be a single unused character (like +) for PTB and blank for others')
FLAGS = flags.FLAGS
def run_test(session, m, data, batch_size, num_steps):
"""Runs the model on the given data."""
costs = 0.0
iters = 0
state = session.run(m.initial_state)
for step, (x, y) in enumerate(reader.dataset_iterator(data, batch_size, num_steps)):
cost, state = session.run([m.cost, m.final_state], {
m.input_data: x,
m.targets: y,
m.initial_state: state
})
costs += cost
iters += 1
return costs / iters
def main(_):
''' Loads trained model and evaluates it on test split '''
if FLAGS.load_model is None:
print('Please specify checkpoint file to load model from')
return -1
if not os.path.exists(FLAGS.load_model):
print('Checkpoint file not found', FLAGS.load_model)
return -1
word_vocab, char_vocab, word_tensors, char_tensors, max_word_length = \
load_data(FLAGS.data_dir, FLAGS.max_word_length, eos=FLAGS.EOS)
test_reader = DataReader(word_tensors['test'], char_tensors['test'],
FLAGS.batch_size, FLAGS.num_unroll_steps)
print('initialized test dataset reader')
with tf.Graph().as_default(), tf.Session() as session:
# tensorflow seed must be inside graph
tf.set_random_seed(FLAGS.seed)
np.random.seed(seed=FLAGS.seed)
''' build inference graph '''
with tf.variable_scope("Model"):
m = model.inference_graph(
char_vocab_size=char_vocab.size,
word_vocab_size=word_vocab.size,
char_embed_size=FLAGS.char_embed_size,
batch_size=FLAGS.batch_size,
num_highway_layers=FLAGS.highway_layers,
num_rnn_layers=FLAGS.rnn_layers,
rnn_size=FLAGS.rnn_size,
max_word_length=max_word_length,
kernels=eval(FLAGS.kernels),
kernel_features=eval(FLAGS.kernel_features),
num_unroll_steps=FLAGS.num_unroll_steps,
dropout=0)
m.update(model.loss_graph(m.logits, FLAGS.batch_size, FLAGS.num_unroll_steps))
global_step = tf.Variable(0, dtype=tf.int32, name='global_step')
saver = tf.train.Saver()
# LEON: import latest checkpoint
latest_ckpt = tf.train.latest_checkpoint(FLAGS.load_model)
# saver.restore(session, FLAGS.load_model)
saver.restore(session, latest_ckpt)
print('Loaded model from', FLAGS.load_model, 'saved at global step', global_step.eval())
''' training starts here '''
rnn_state = session.run(m.initial_rnn_state)
count = 0
avg_loss = 0
start_time = time.time()
for x, y in test_reader.iter():
count += 1
loss, rnn_state = session.run([
m.loss,
m.final_rnn_state
], {
m.input : x,
m.targets: y,
m.initial_rnn_state: rnn_state
})
avg_loss += loss
avg_loss /= count
time_elapsed = time.time() - start_time
print("test loss = %6.8f, perplexity = %6.8f" % (avg_loss, np.exp(avg_loss)))
print("test samples:", count*FLAGS.batch_size, "time elapsed:", time_elapsed, "time per one batch:", time_elapsed/count)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 | -3,196,438,457,534,124,500 | 37.410072 | 139 | 0.588687 | false |
briancurtin/python-openstacksdk | openstack/identity/v3/user.py | 1 | 3008 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.identity import identity_service
from openstack import resource2 as resource
class User(resource.Resource):
resource_key = 'user'
resources_key = 'users'
base_path = '/users'
service = identity_service.IdentityService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
patch_update = True
# Properties
#: References the user's default project ID against which to authorize,
#: if the API user does not explicitly specify one when creating a token.
#: Setting this attribute does not grant any actual authorization on the
#: project, and is merely provided for the user's convenience.
#: Therefore, the referenced project does not need to exist within the
#: user's domain.
#:
#: *New in version 3.1* If the user does not have authorization to
#: their default project, the default project will be ignored at token
#: creation. *Type: string*
default_project_id = resource.Body('default_project_id')
#: The description of this user. *Type: string*
description = resource.Body('description')
#: References the domain ID which owns the user; if a domain ID is not
#: specified by the client, the Identity service implementation will
#: default it to the domain ID to which the client's token is scoped.
#: *Type: string*
domain_id = resource.Body('domain_id')
#: The email of this user. *Type: string*
email = resource.Body('email')
#: Setting this value to ``False`` prevents the user from authenticating or
#: receiving authorization. Additionally, all pre-existing tokens held by
#: the user are immediately invalidated. Re-enabling a user does not
#: re-enable pre-existing tokens. *Type: bool*
is_enabled = resource.Body('enabled', type=bool)
#: The links for the user resource.
links = resource.Body('links')
#: Unique user name, within the owning domain. *Type: string*
name = resource.Body('name')
#: The default form of credential used during authentication.
#: *Type: string*
password = resource.Body('password')
#: The date and time when the pasword expires. The time zone is UTC.
#: A None value means the password never expires.
#: This is a response object attribute, not valid for requests.
#: *New in version 3.7*
password_expires_at = resource.Body('password_expires_at')
| apache-2.0 | 3,061,282,737,069,946,000 | 43.235294 | 79 | 0.705452 | false |
zuo/unittest_expander | setup.py | 1 | 1683 | import re
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
VERSION_REGEX = re.compile(b'''
^
release
\s*
=
\s*
['"]
(?P<version>
[^'"]+
)
['"]
\s*
$
''', re.VERBOSE)
def get_version():
with open('docs/conf.py', 'rb') as f:
for line in f:
match = VERSION_REGEX.search(line)
if match:
return match.group('version').decode('utf-8')
raise AssertionError('version not specified')
def get_long_description():
with open('README.rst', 'rb') as f:
return f.read().decode('utf-8')
setup(
name='unittest_expander',
version=get_version(),
py_modules=['unittest_expander'],
author='Jan Kaliszewski (zuo)',
author_email='[email protected]',
description='Easy and flexible unittest parameterization.',
long_description=get_long_description(),
keywords='unittest testing parameterization parametrization',
url='https://github.com/zuo/unittest_expander',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Testing',
],
)
| mit | 1,311,275,602,204,975,000 | 24.892308 | 65 | 0.588235 | false |
rbuffat/pyepw | tests/test_ground_temperatures.py | 1 | 7055 | import os
import tempfile
import unittest
from pyepw.epw import GroundTemperatures, GroundTemperature, EPW
class TestGroundTemperatures(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_ground_temperatures(self):
obj = GroundTemperatures()
ground_temperature_obj = GroundTemperature()
var_ground_temperature_ground_temperature_depth = 1.1
ground_temperature_obj.ground_temperature_depth = var_ground_temperature_ground_temperature_depth
var_ground_temperature_depth_soil_conductivity = 2.2
ground_temperature_obj.depth_soil_conductivity = var_ground_temperature_depth_soil_conductivity
var_ground_temperature_depth_soil_density = 3.3
ground_temperature_obj.depth_soil_density = var_ground_temperature_depth_soil_density
var_ground_temperature_depth_soil_specific_heat = 4.4
ground_temperature_obj.depth_soil_specific_heat = var_ground_temperature_depth_soil_specific_heat
var_ground_temperature_depth_january_average_ground_temperature = 5.5
ground_temperature_obj.depth_january_average_ground_temperature = var_ground_temperature_depth_january_average_ground_temperature
var_ground_temperature_depth_february_average_ground_temperature = 6.6
ground_temperature_obj.depth_february_average_ground_temperature = var_ground_temperature_depth_february_average_ground_temperature
var_ground_temperature_depth_march_average_ground_temperature = 7.7
ground_temperature_obj.depth_march_average_ground_temperature = var_ground_temperature_depth_march_average_ground_temperature
var_ground_temperature_depth_april_average_ground_temperature = 8.8
ground_temperature_obj.depth_april_average_ground_temperature = var_ground_temperature_depth_april_average_ground_temperature
var_ground_temperature_depth_may_average_ground_temperature = 9.9
ground_temperature_obj.depth_may_average_ground_temperature = var_ground_temperature_depth_may_average_ground_temperature
var_ground_temperature_depth_june_average_ground_temperature = 10.10
ground_temperature_obj.depth_june_average_ground_temperature = var_ground_temperature_depth_june_average_ground_temperature
var_ground_temperature_depth_july_average_ground_temperature = 11.11
ground_temperature_obj.depth_july_average_ground_temperature = var_ground_temperature_depth_july_average_ground_temperature
var_ground_temperature_depth_august_average_ground_temperature = 12.12
ground_temperature_obj.depth_august_average_ground_temperature = var_ground_temperature_depth_august_average_ground_temperature
var_ground_temperature_depth_september_average_ground_temperature = 13.13
ground_temperature_obj.depth_september_average_ground_temperature = var_ground_temperature_depth_september_average_ground_temperature
var_ground_temperature_depth_october_average_ground_temperature = 14.14
ground_temperature_obj.depth_october_average_ground_temperature = var_ground_temperature_depth_october_average_ground_temperature
var_ground_temperature_depth_november_average_ground_temperature = 15.15
ground_temperature_obj.depth_november_average_ground_temperature = var_ground_temperature_depth_november_average_ground_temperature
var_ground_temperature_depth_december_average_ground_temperature = 16.16
ground_temperature_obj.depth_december_average_ground_temperature = var_ground_temperature_depth_december_average_ground_temperature
obj.add_ground_temperature(ground_temperature_obj)
epw = EPW(ground_temperatures=obj)
epw.save(self.path, check=False)
epw2 = EPW()
epw2.read(self.path)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].ground_temperature_depth,
var_ground_temperature_ground_temperature_depth)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_soil_conductivity,
var_ground_temperature_depth_soil_conductivity)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_soil_density,
var_ground_temperature_depth_soil_density)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_soil_specific_heat,
var_ground_temperature_depth_soil_specific_heat)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_january_average_ground_temperature,
var_ground_temperature_depth_january_average_ground_temperature)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_february_average_ground_temperature,
var_ground_temperature_depth_february_average_ground_temperature)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_march_average_ground_temperature,
var_ground_temperature_depth_march_average_ground_temperature)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_april_average_ground_temperature,
var_ground_temperature_depth_april_average_ground_temperature)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_may_average_ground_temperature,
var_ground_temperature_depth_may_average_ground_temperature)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_june_average_ground_temperature,
var_ground_temperature_depth_june_average_ground_temperature)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_july_average_ground_temperature,
var_ground_temperature_depth_july_average_ground_temperature)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_august_average_ground_temperature,
var_ground_temperature_depth_august_average_ground_temperature)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_september_average_ground_temperature,
var_ground_temperature_depth_september_average_ground_temperature)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_october_average_ground_temperature,
var_ground_temperature_depth_october_average_ground_temperature)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_november_average_ground_temperature,
var_ground_temperature_depth_november_average_ground_temperature)
self.assertAlmostEqual(
epw2.ground_temperatures.ground_temperatures[0].depth_december_average_ground_temperature,
var_ground_temperature_depth_december_average_ground_temperature)
| apache-2.0 | 2,445,684,638,360,404,500 | 66.190476 | 141 | 0.743586 | false |
a-detiste/game-data-packager | tools/check_gog.py | 1 | 3135 | #!/usr/bin/python3
# encoding=utf-8
#
# Copyright © 2017 Alexandre Detiste <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# You can find the GPL license text on a Debian system under
# /usr/share/common-licenses/GPL-2.
# GDP_UNINSTALLED=1 python3 -m tools.check_gog
import json
import os
import subprocess
from distutils.version import LooseVersion
from game_data_packager.game import load_games
from game_data_packager.gog import GOG
yaml_files = {}
owned_files = []
owned_games = GOG.owned_games()
def decode_filename(archive):
game = archive[len('setup_')::]
game = game.rsplit('_', 1)[0].strip()
version = archive.split('_')[-1][:-4:]
return(archive, game, version)
print('# Reading data from YAML...')
for name, game in load_games(datadir='out').items():
game.load_file_data()
has_tags = has_archive = is_removed = False
for package in game.packages.values():
gog = package.gog or game.gog
if 'removed' in gog:
is_removed = True
elif 'url' in gog:
has_tags = True
for filename,f in game.files.items():
if filename.startswith('setup_') and filename.endswith('.exe'):
has_archive = True
_, supported, version = decode_filename(filename)
if '.' not in version:
# ancient GOG packages
continue
if LooseVersion(version) > LooseVersion(yaml_files.get(supported, '0')):
yaml_files[supported] = version
if has_tags != has_archive and not is_removed:
owned = game.gog_download_name(package) in owned_games
print('GOG metadata not in sync for %s. (has_tags: %s, has_archive: %s, owned: %s)'
% (name, has_tags, has_archive, owned))
print('# Reading data from LGOGDOWNLOADER...')
cache = os.path.expanduser('~/.cache/lgogdownloader/gamedetails.json')
if not os.path.isfile(cache):
print("Couldn't locate any game, running 'lgogdownloader --login'")
subprocess.call(['lgogdownloader', '--login'])
print("... and now 'lgogdownloader --update-cache'")
subprocess.call(['lgogdownloader', '--update-cache'])
data = json.load(open(cache, encoding='utf-8'))
for game in data['games']:
for installer in game['installers']:
filename = installer['path']
filename = os.path.basename(filename)
if filename.startswith('setup_') and filename.endswith('.exe'):
owned_files.append(decode_filename(filename))
print('# Left join...')
for supported, curr_ver in yaml_files.items():
for _, to_match, last_ver in owned_files:
if supported == to_match and last_ver > curr_ver:
print("%-40s %12s -> %-12s" % (supported, curr_ver, last_ver))
| gpl-2.0 | 8,912,646,860,192,819,000 | 37.219512 | 91 | 0.658264 | false |
0xa/toomuchwin | toomuchwin/models/tx.py | 1 | 4316 | from datetime import datetime
from sqlalchemy import (
Column,
BigInteger, DateTime,
ForeignKey, CheckConstraint
)
from sqlalchemy.orm import relationship
from sqlalchemy.sql import select
from sqlalchemy.ext.hybrid import hybrid_property
from .enum import DeclEnum
from .base import Base
class Tx(Base):
""" A transaction between an user and an in-game user.
amount: if > 0, in-game -> User.balance. (=deposit)
if < 0, User.balance -> in-game. (=withdrawal)
If it has no events, the default status is pending if it is a withdrawal
or a contract, closed if it is a deposit.
note for create_date: For deposits only, it is imported as-is from EVE API,
and will be in *EVE time* unlike every other datetime field, or even
withdrawals and contracts.
"""
__tablename__ = 'txs'
__table_args__ = (
CheckConstraint('(amount is null) != (prize_id is null)'),
)
id = Column(BigInteger, primary_key=True)
user_id = Column(ForeignKey('users.id'), nullable=False)
create_date = Column(DateTime, nullable=False, default=datetime.now)
amount = Column(BigInteger, nullable=True)
prize_id = Column(ForeignKey('prizes.id'), nullable=True)
events = relationship('TxEvent', backref='tx', lazy='joined',
order_by='TxEvent.id.desc()')
status_events = relationship('TxEvent', order_by='TxEvent.id',
primaryjoin='and_(TxEvent.tx_id == Tx.id, TxEvent.type == "status")',
lazy='joined')
assign_events = relationship('TxEvent', order_by='TxEvent.id',
primaryjoin='and_(TxEvent.tx_id == Tx.id, TxEvent.type == "assign")',
lazy='joined')
@hybrid_property
def assigned_to(self):
""" Returns a User if it is assigned, None if it is not. """
if self.assign_events:
return self.assign_events[-1].assigned_to
else:
return None
@assigned_to.expression
def assigned_to(cls):
return select([TxEvent.assigned_to_id]) \
.where(TxEvent.tx_id == cls.id).where(TxEvent.type == 'assign') \
.order_by(TxEvent.id.desc()).limit(1).label('assigned_to')
@hybrid_property
def status(self):
if self.status_events:
return self.status_events[-1].status
else:
return None
@status.expression
def status(cls):
return select([TxEvent.status]) \
.where(TxEvent.tx_id == cls.id).where(TxEvent.type == 'status') \
.order_by(TxEvent.id.desc()).limit(1).label('status')
def __repr__(self):
return super().__repr__(user_id=self.user_id, prize_id=self.prize_id,
amount=self.amount)
class TxEventType(DeclEnum):
status = 'status', 'Status change'
assign = 'assign', 'Assignation'
class TxEventStatus(DeclEnum):
contracted = 'contracted', 'Contracted'
closed = 'closed', 'Closed'
other = 'other', 'Other'
class TxEvent(Base):
""" Tx Event
user: The User who made the action.
assigned_to: The User assigned to the Tx.
The User currently assigned to a Tx is the last assigned_to.
status: Tx status change. Can be one of:
- contracted: The contract has been made, to be accepted.
- closed: Contract accepted / ISK sent.
- other: Anything else/unexpected.
"""
__tablename__ = 'tx_events'
id = Column(BigInteger, primary_key=True)
tx_id = Column(ForeignKey('txs.id'), nullable=False, index=True)
user_id = Column(ForeignKey('users.id'), nullable=False)
create_date = Column(DateTime, nullable=False, default=datetime.now)
type = Column(TxEventType.db_type(), nullable=False)
assigned_to_id = Column(ForeignKey('users.id'), nullable=True)
status = Column(TxEventStatus.db_type(), nullable=True)
def __repr__(self):
if self.type == TxEventType.assign:
return super().__repr__(tx_id=self.tx_id, user_id=self.user_id,
assigned_to_id=self.assigned_to_id)
if self.type == TxEventType.assign:
return super().__repr__(tx_id=self.tx_id, user_id=self.user_id,
status=self.status)
return super().__repr__(tx_id=self.tx_id, user_id=self.user_id)
| mit | -6,688,397,405,071,409,000 | 35.268908 | 79 | 0.620714 | false |
stormaaja/csvconverter | update_wrapper.py | 1 | 2774 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from urllib2 import Request, urlopen, HTTPError, URLError
from csv_converter import CsvConverter
from stock_updater import StockUpdater, ProductNotFoundError, MultipleProductsFoundError
import sqlite3
import mysql.connector
import os
import json
import logging
import database_helper
class UpdateWrapper:
def read_config(self, file):
with open(file) as f:
self.config = json.load(f)
def run(self):
request = Request(self.config["source"]["url"])
try:
response = urlopen(request)
except HTTPError as e:
logging.error("The server returned error %d", e.code)
raise e
except URLError as e:
logging.error("Failed to reach server: %s", e.reason)
raise e
converter = CsvConverter("")
converter.setSourceColumns(
self.config["source"]["product_code_column"],
self.config["source"]["quantity_column"])
converter.read_csv(response)
logging.info("%s products to update", len(converter.rows))
if self.config["database_connection"]["type"] == "sqlite3":
conn = sqlite3.connect(self.config["database_connection"]["database"])
elif self.config["database_connection"]["type"] == "mysql":
conn = mysql.connector.connect(
user=self.config["database_connection"]["username"],
password=self.config["database_connection"]["password"],
host=self.config["database_connection"]["host"],
database=self.config["database_connection"]["database"])
else:
raise "Please, define database"
if self.config["testing"]:
database_helper.initialize(conn)
database_helper.add_test_products(conn)
updater = StockUpdater(conn)
updater.set_destination_colums(
self.config["database_connection"]["product_code_column"],
self.config["database_connection"]["quantity_column"])
updater.set_table(self.config["database_connection"]["products_table"])
for item in converter.rows:
logging.info("Updating product {}".format(item['product_code']))
try:
updater.update_quantity(item['product_code'], item['quantity'])
except ProductNotFoundError as e:
logging.warning("Product {} not found".format(item['product_code']))
except MultipleProductsFoundError as e:
logging.error("Multiple products found with product id {}".format(item['product_code']))
except sqlite3.ProgrammingError:
logging.error("Invalid product code: {}".format(item['product_code']))
| mit | -2,527,070,918,271,149,600 | 37.527778 | 104 | 0.621125 | false |
anchore/anchore | anchore/anchore-modules/analyzers/30_file_checksums.py | 1 | 2870 | #!/usr/bin/env python
import sys
import os
import shutil
import re
import json
import time
import rpm
import subprocess
import stat
import tarfile
import time
import hashlib
import anchore.anchore_utils
analyzer_name = "file_checksums"
try:
config = anchore.anchore_utils.init_analyzer_cmdline(sys.argv, analyzer_name)
except Exception as err:
print str(err)
sys.exit(1)
imgname = config['imgid']
imgid = config['imgid_full']
outputdir = config['dirs']['outputdir']
unpackdir = config['dirs']['unpackdir']
#if not os.path.exists(outputdir):
# os.makedirs(outputdir)
domd5 = True
dosha1 = False
outfiles_sha1 = {}
outfiles_md5 = {}
outfiles_sha256 = {}
meta = anchore.anchore_utils.get_distro_from_path('/'.join([unpackdir, "rootfs"]))
distrodict = anchore.anchore_utils.get_distro_flavor(meta['DISTRO'], meta['DISTROVERS'], likedistro=meta['LIKEDISTRO'])
if distrodict['flavor'] == "ALPINE":
dosha1 = True
try:
timer = time.time()
(tmp, allfiles) = anchore.anchore_utils.get_files_from_path(unpackdir + "/rootfs")
for name in allfiles.keys():
name = re.sub("^\.", "", name)
thefile = '/'.join([unpackdir, "rootfs", name])
csum = "DIRECTORY_OR_OTHER"
if os.path.isfile(thefile) and not os.path.islink(thefile):
if domd5:
try:
with open(thefile, 'r') as FH:
csum = hashlib.md5(FH.read()).hexdigest()
except:
csum = "DIRECTORY_OR_OTHER"
outfiles_md5[name] = csum
if dosha1:
try:
with open(thefile, 'r') as FH:
csum = hashlib.sha1(FH.read()).hexdigest()
except:
csum = "DIRECTORY_OR_OTHER"
outfiles_sha1[name] = csum
try:
with open(thefile, 'r') as FH:
csum = hashlib.sha256(FH.read()).hexdigest()
except:
csum = "DIRECTORY_OR_OTHER"
outfiles_sha256[name] = csum
else:
if domd5:
outfiles_md5[name] = "DIRECTORY_OR_OTHER"
if dosha1:
outfiles_sha1[name] = "DIRECTORY_OR_OTHER"
outfiles_sha256[name] = "DIRECTORY_OR_OTHER"
except Exception as err:
import traceback
traceback.print_exc()
print "ERROR: " + str(err)
raise err
if outfiles_sha1:
ofile = os.path.join(outputdir, 'files.sha1sums')
anchore.anchore_utils.write_kvfile_fromdict(ofile, outfiles_sha1)
if outfiles_md5:
ofile = os.path.join(outputdir, 'files.md5sums')
anchore.anchore_utils.write_kvfile_fromdict(ofile, outfiles_md5)
if outfiles_sha256:
ofile = os.path.join(outputdir, 'files.sha256sums')
anchore.anchore_utils.write_kvfile_fromdict(ofile, outfiles_sha256)
sys.exit(0)
| apache-2.0 | -8,097,181,110,161,826,000 | 26.333333 | 119 | 0.599652 | false |
GuozhuHe/webspider | webspider/tasks/actor/lagou_jobs_count.py | 1 | 1329 | # coding=utf-8
import logging
from datetime import datetime
from webspider import crawlers
from webspider.tasks.celery_app import celery_app
from webspider.controllers import keyword_ctl, job_keyword_ctl
from webspider.models import JobsCountModel
logger = logging.getLogger(__name__)
@celery_app.task()
def crawl_lagou_jobs_count_task():
keyword_ids = job_keyword_ctl.get_most_frequently_keyword_ids(limit=1000)
for keyword_id in keyword_ids:
crawl_lagou_keyword_jobs_count_task.delay(keyword_id)
@celery_app.task()
def crawl_lagou_keyword_jobs_count_task(keyword_id):
cities_name_map = {
'all_city': u'全国',
'beijing': u'北京',
'shanghai': u'上海',
'guangzhou': u'广州',
'shenzhen': u'深圳',
'hangzhou': u'杭州',
'chengdu': u'成都',
}
keyword_name = keyword_ctl.get_keyword_name_by_id(keyword_id)
jobs_count_dict = dict(keyword_id=keyword_id)
for city_name_key, city_name in cities_name_map.items():
jobs_count_dict[city_name_key] = crawlers.get_jobs_count_from_lagou(city_name=city_name,
keyword_name=keyword_name)
jobs_count_dict['date'] = int(datetime.today().strftime('%Y%m%d'))
JobsCountModel.add(**jobs_count_dict)
| mit | -3,362,150,394,207,496,700 | 33.236842 | 102 | 0.641814 | false |
adamrp/qiita | qiita_db/test/test_job.py | 1 | 15935 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from os import remove, mkdir
from os.path import join, exists
from shutil import rmtree
from datetime import datetime
from qiita_core.util import qiita_test_checker
from qiita_db.job import Job, Command
from qiita_db.user import User
from qiita_db.util import get_mountpoint, get_count
from qiita_db.analysis import Analysis
from qiita_db.exceptions import (QiitaDBDuplicateError, QiitaDBStatusError,
QiitaDBUnknownIDError)
@qiita_test_checker()
class JobTest(TestCase):
"""Tests that the job object works as expected"""
def setUp(self):
self.job = Job(1)
self.options = {"option1": False, "option2": 25, "option3": "NEW"}
self._delete_path = []
self._delete_dir = []
_, self._job_folder = get_mountpoint("job")[0]
def tearDown(self):
# needs to be this way because map does not play well with remove and
# rmtree for python3
for item in self._delete_path:
remove(item)
for item in self._delete_dir:
rmtree(item)
def test_exists(self):
"""tests that existing job returns true"""
# need to insert matching sample data into analysis 2
self.conn_handler.execute(
"DELETE FROM qiita.analysis_sample WHERE analysis_id = 2")
self.conn_handler.execute(
"INSERT INTO qiita.analysis_sample "
"(analysis_id, processed_data_id, sample_id) VALUES "
"(2, 1,'1.SKB8.640193'), (2, 1,'1.SKD8.640184'), "
"(2, 1,'1.SKB7.640196'), (2, 1,'1.SKM9.640192'), "
"(2, 1,'1.SKM4.640180')")
self.assertTrue(Job.exists("18S", "Beta Diversity",
{"--otu_table_fp": 1,
"--mapping_fp": 1}, Analysis(1)))
def test_exists_return_jobid(self):
"""tests that existing job returns true"""
# need to insert matching sample data into analysis 2
self.conn_handler.execute(
"DELETE FROM qiita.analysis_sample WHERE analysis_id = 2")
self.conn_handler.execute(
"INSERT INTO qiita.analysis_sample "
"(analysis_id, processed_data_id, sample_id) VALUES "
"(2, 1,'1.SKB8.640193'), (2, 1,'1.SKD8.640184'), "
"(2, 1,'1.SKB7.640196'), (2, 1,'1.SKM9.640192'), "
"(2, 1,'1.SKM4.640180')")
exists, jid = Job.exists("18S", "Beta Diversity",
{"--otu_table_fp": 1, "--mapping_fp": 1},
Analysis(1), return_existing=True)
self.assertTrue(exists)
self.assertEqual(jid, Job(2))
def test_exists_noexist_options(self):
"""tests that non-existant job with bad options returns false"""
# need to insert matching sample data into analysis 2
# makes sure failure is because options and not samples
self.conn_handler.execute(
"DELETE FROM qiita.analysis_sample WHERE analysis_id = 2")
self.conn_handler.execute(
"INSERT INTO qiita.analysis_sample "
"(analysis_id, processed_data_id, sample_id) VALUES "
"(2, 1,'1.SKB8.640193'), (2, 1,'1.SKD8.640184'), "
"(2, 1,'1.SKB7.640196'), (2, 1,'1.SKM9.640192'), "
"(2, 1,'1.SKM4.640180')")
self.assertFalse(Job.exists("18S", "Beta Diversity",
{"--otu_table_fp": 1,
"--mapping_fp": 27}, Analysis(1)))
def test_exists_noexist_return_jobid(self):
"""tests that non-existant job with bad samples returns false"""
exists, jid = Job.exists(
"16S", "Beta Diversity",
{"--otu_table_fp": 1, "--mapping_fp": 27}, Analysis(1),
return_existing=True)
self.assertFalse(exists)
self.assertEqual(jid, None)
def test_get_commands(self):
exp = [
Command('Summarize Taxa', 'summarize_taxa_through_plots.py',
'{"--otu_table_fp":null}', '{}',
'{"--mapping_category":null, "--mapping_fp":null,'
'"--sort":null}', '{"--output_dir":null}'),
Command('Beta Diversity', 'beta_diversity_through_plots.py',
'{"--otu_table_fp":null,"--mapping_fp":null}', '{}',
'{"--tree_fp":null,"--color_by_all_fields":null,'
'"--seqs_per_sample":null}', '{"--output_dir":null}'),
Command('Alpha Rarefaction', 'alpha_rarefaction.py',
'{"--otu_table_fp":null,"--mapping_fp":null}', '{}',
'{"--tree_fp":null,"--num_steps":null,''"--min_rare_depth"'
':null,"--max_rare_depth":null,'
'"--retain_intermediate_files":false}',
'{"--output_dir":null}')
]
self.assertEqual(Job.get_commands(), exp)
def test_delete_files(self):
try:
Job.delete(1)
with self.assertRaises(QiitaDBUnknownIDError):
Job(1)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id = 10")
self.assertEqual(obs, [])
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.job_results_filepath WHERE job_id = 1")
self.assertEqual(obs, [])
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.analysis_job WHERE job_id = 1")
self.assertEqual(obs, [])
self.assertFalse(exists(join(self._job_folder,
"1_job_result.txt")))
finally:
f = join(self._job_folder, "1_job_result.txt")
if not exists(f):
with open(f, 'w') as f:
f.write("job1result.txt")
def test_delete_folders(self):
try:
Job.delete(2)
with self.assertRaises(QiitaDBUnknownIDError):
Job(2)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id = 11")
self.assertEqual(obs, [])
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.job_results_filepath WHERE job_id = 2")
self.assertEqual(obs, [])
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.analysis_job WHERE job_id = 2")
self.assertEqual(obs, [])
self.assertFalse(exists(join(self._job_folder, "2_test_folder")))
finally:
# put the test data back
basedir = self._job_folder
if not exists(join(basedir, "2_test_folder")):
mkdir(join(basedir, "2_test_folder"))
mkdir(join(basedir, "2_test_folder", "subdir"))
with open(join(basedir, "2_test_folder",
"testfile.txt"), 'w') as f:
f.write("DATA")
with open(join(basedir, "2_test_folder",
"testres.htm"), 'w') as f:
f.write("DATA")
with open(join(basedir, "2_test_folder",
"subdir", "subres.html"), 'w') as f:
f.write("DATA")
def test_create(self):
"""Makes sure creation works as expected"""
# make first job
new = Job.create("18S", "Alpha Rarefaction", {"opt1": 4}, Analysis(1))
self.assertEqual(new.id, 4)
# make sure job inserted correctly
obs = self.conn_handler.execute_fetchall("SELECT * FROM qiita.job "
"WHERE job_id = 4")
exp = [[4, 2, 1, 3, '{"opt1":4}', None]]
self.assertEqual(obs, exp)
# make sure job added to analysis correctly
obs = self.conn_handler.execute_fetchall("SELECT * FROM "
"qiita.analysis_job WHERE "
"job_id = 4")
exp = [[1, 4]]
self.assertEqual(obs, exp)
# make second job with diff datatype and command to test column insert
new = Job.create("16S", "Beta Diversity", {"opt1": 4}, Analysis(1))
self.assertEqual(new.id, 5)
# make sure job inserted correctly
obs = self.conn_handler.execute_fetchall("SELECT * FROM qiita.job "
"WHERE job_id = 5")
exp = [[5, 1, 1, 2, '{"opt1":4}', None]]
self.assertEqual(obs, exp)
# make sure job added to analysis correctly
obs = self.conn_handler.execute_fetchall("SELECT * FROM "
"qiita.analysis_job WHERE "
"job_id = 5")
exp = [[1, 5]]
self.assertEqual(obs, exp)
def test_create_exists(self):
"""Makes sure creation doesn't duplicate a job"""
with self.assertRaises(QiitaDBDuplicateError):
Job.create("18S", "Beta Diversity",
{"--otu_table_fp": 1, "--mapping_fp": 1},
Analysis(1))
def test_create_exists_return_existing(self):
"""Makes sure creation doesn't duplicate a job by returning existing"""
new_id = get_count("qiita.analysis") + 1
Analysis.create(User("[email protected]"), "new", "desc")
self.conn_handler.execute(
"INSERT INTO qiita.analysis_sample "
"(analysis_id, processed_data_id, sample_id) VALUES "
"({0}, 1, '1.SKB8.640193'), ({0}, 1, '1.SKD8.640184'), "
"({0}, 1, '1.SKB7.640196'), ({0}, 1, '1.SKM9.640192'), "
"({0}, 1, '1.SKM4.640180')".format(new_id))
new = Job.create("18S", "Beta Diversity",
{"--otu_table_fp": 1, "--mapping_fp": 1},
Analysis(new_id), return_existing=True)
self.assertEqual(new.id, 2)
def test_retrieve_datatype(self):
"""Makes sure datatype retrieval is correct"""
self.assertEqual(self.job.datatype, '18S')
def test_retrieve_command(self):
"""Makes sure command retrieval is correct"""
self.assertEqual(self.job.command, ['Summarize Taxa',
'summarize_taxa_through_plots.py'])
def test_retrieve_options(self):
self.assertEqual(self.job.options, {
'--otu_table_fp': 1,
'--output_dir': join(
self._job_folder,
'1_summarize_taxa_through_plots.py_output_dir')})
def test_set_options(self):
new = Job.create("18S", "Alpha Rarefaction", {"opt1": 4}, Analysis(1))
new.options = self.options
self.options['--output_dir'] = join(self._job_folder,
'4_alpha_rarefaction.'
'py_output_dir')
self.assertEqual(new.options, self.options)
def test_retrieve_results(self):
self.assertEqual(self.job.results, ["1_job_result.txt"])
def test_retrieve_results_folder(self):
job = Job(2)
self.assertEqual(job.results, ['2_test_folder/testres.htm',
'2_test_folder/subdir/subres.html'])
def test_retrieve_results_empty(self):
new = Job.create("18S", "Beta Diversity", {"opt1": 4}, Analysis(1))
self.assertEqual(new.results, [])
def test_set_error(self):
before = datetime.now()
self.job.set_error("TESTERROR")
after = datetime.now()
self.assertEqual(self.job.status, "error")
error = self.job.error
self.assertEqual(error.severity, 2)
self.assertEqual(error.msg, 'TESTERROR')
self.assertTrue(before < error.time < after)
def test_retrieve_error_blank(self):
self.assertEqual(self.job.error, None)
def test_set_error_completed(self):
self.job.status = "error"
with self.assertRaises(QiitaDBStatusError):
self.job.set_error("TESTERROR")
def test_retrieve_error_exists(self):
self.job.set_error("TESTERROR")
self.assertEqual(self.job.error.msg, "TESTERROR")
def test_add_results(self):
fp_count = get_count('qiita.filepath')
self.job.add_results([(join(self._job_folder, "1_job_result.txt"),
"plain_text")])
# make sure files attached to job properly
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.job_results_filepath WHERE job_id = 1")
self.assertEqual(obs, [[1, 10], [1, fp_count + 1]])
def test_add_results_dir(self):
fp_count = get_count('qiita.filepath')
# Create a test directory
test_dir = join(self._job_folder, "2_test_folder")
# add folder to job
self.job.add_results([(test_dir, "directory")])
# make sure files attached to job properly
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.job_results_filepath WHERE job_id = 1")
self.assertEqual(obs, [[1, 10], [1, fp_count + 1]])
def test_add_results_completed(self):
self.job.status = "completed"
with self.assertRaises(QiitaDBStatusError):
self.job.add_results([("/fake/dir/", "directory")])
@qiita_test_checker()
class CommandTest(TestCase):
def setUp(self):
com1 = Command('Summarize Taxa', 'summarize_taxa_through_plots.py',
'{"--otu_table_fp":null}', '{}',
'{"--mapping_category":null, "--mapping_fp":null,'
'"--sort":null}', '{"--output_dir":null}')
com2 = Command('Beta Diversity', 'beta_diversity_through_plots.py',
'{"--otu_table_fp":null,"--mapping_fp":null}', '{}',
'{"--tree_fp":null,"--color_by_all_fields":null,'
'"--seqs_per_sample":null}', '{"--output_dir":null}')
com3 = Command('Alpha Rarefaction', 'alpha_rarefaction.py',
'{"--otu_table_fp":null,"--mapping_fp":null}', '{}',
'{"--tree_fp":null,"--num_steps":null,'
'"--min_rare_depth"'
':null,"--max_rare_depth":null,'
'"--retain_intermediate_files":false}',
'{"--output_dir":null}')
self.all_comms = {
"16S": [com1, com2, com3],
"18S": [com1, com2, com3],
"ITS": [com2, com3],
"Proteomic": [com2, com3],
"Metabolomic": [com2, com3],
"Metagenomic": [com2, com3],
}
def test_get_commands_by_datatype(self):
obs = Command.get_commands_by_datatype()
self.assertEqual(obs, self.all_comms)
obs = Command.get_commands_by_datatype(["16S", "Metabolomic"])
exp = {k: self.all_comms[k] for k in ('16S', 'Metabolomic')}
self.assertEqual(obs, exp)
def test_equal(self):
commands = Command.create_list()
self.assertTrue(commands[1] == commands[1])
self.assertFalse(commands[1] == commands[2])
self.assertFalse(commands[1] == Job(1))
def test_not_equal(self):
commands = Command.create_list()
self.assertFalse(commands[1] != commands[1])
self.assertTrue(commands[1] != commands[2])
self.assertTrue(commands[1] != Job(1))
if __name__ == "__main__":
main()
| bsd-3-clause | -9,045,347,490,501,964,000 | 41.836022 | 79 | 0.527644 | false |
our-city-app/oca-backend | tools/replace_branding.py | 1 | 4088 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from __future__ import unicode_literals
import argparse
import base64
import logging
import os
import uuid
import zipfile
from StringIO import StringIO
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def call_rogerthat(server_url, api_key, data):
import urllib2
import json
data['id'] = str(uuid.uuid4())
json_data = json.dumps(data)
headers = {
'Content-Type': 'application/json-rpc; charset=utf-8',
'X-Nuntiuz-API-key': api_key
}
request = urllib2.Request(server_url, json_data, headers)
response = urllib2.urlopen(request)
if response.getcode() != 200:
raise Exception(u'%s\n%s' % (response.getcode(), response.read()))
result = response.read()
result_dict = json.loads(result)
if result_dict['error']:
raise Exception(result_dict['error'])
return result_dict
def zip_folder(folder_path):
"""Zip the contents of an entire folder (with that folder included in the archive).
Empty subfolders will be included in the archive as well.
"""
os.chdir(folder_path)
parent_folder = os.path.dirname(folder_path)
contents = os.walk(folder_path)
stream = StringIO()
zip_file = zipfile.ZipFile(stream, 'w', zipfile.ZIP_DEFLATED)
try:
for root, folders, files in contents:
# Include all subfolders, including empty ones.
for folder_name in folders:
absolute_path = os.path.join(root, folder_name)
relative_path = absolute_path.replace(folder_path + os.sep, '')
zip_file.write(absolute_path, relative_path)
for file_name in files:
if os.path.splitext(file_name)[1] == '.zip':
continue
absolute_path = os.path.join(root, file_name)
relative_path = absolute_path.replace(folder_path + os.sep, '')
zip_file.write(absolute_path, relative_path)
finally:
zip_file.close()
stream.seek(0)
return stream.getvalue()
def replace_branding(server_url, api_key, branding_path, description):
if not os.path.exists(branding_path):
raise Exception('Path %s does not exist' % branding_path)
branding_name = description or os.path.basename(os.path.dirname(branding_path + os.sep))
if os.path.isdir(branding_path):
branding_zip_content = zip_folder(branding_path)
else:
with open(branding_path, 'r') as f:
branding_zip_content = f.read()
call_rogerthat(server_url, api_key, {
'method': 'system.replace_branding',
'params': {
'description': branding_name,
'content': base64.b64encode(branding_zip_content)
}
})
call_rogerthat(server_url, api_key, {
'method': 'system.publish_changes',
'params': {}
})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--api_key', help='API key of the service')
parser.add_argument('-b', '--branding', help='Folder where the branding is located, or branding zip')
parser.add_argument('-d', '--description', help='Description of the branding')
parser.add_argument('-u', '--server_url', help='Server url, only used for development',
default="https://rogerth.at/api/1")
args = parser.parse_args()
replace_branding(args.server_url, args.api_key, args.branding, args.description)
| apache-2.0 | 2,168,557,505,359,513,600 | 35.828829 | 105 | 0.648239 | false |
ruchee/vimrc | vimfiles/bundle/vim-rust-racer/rplugin/python3/deoplete/sources/racer.py | 1 | 4535 | #=============================================================================
# FILE: racer.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license {{{
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# }}}
#=============================================================================
import re
import os
import subprocess
import tempfile
from .base import Base
class Source(Base):
def __init__(self, vim):
Base.__init__(self, vim)
self.name = 'racer'
self.mark = '[racer]'
self.filetypes = ['rust']
self.input_pattern = r'(\.|::)\w*'
self.rank = 500
def on_init(self, context):
self.__racer = self.vim.call('racer#GetRacerCmd')
self.__executable_racer = self.vim.funcs.executable(self.__racer)
def get_complete_position(self, context):
if not self.__executable_racer:
return -1
m = re.search('\w*$', context['input'])
return m.start() if m else -1
def gather_candidates(self, context):
typeMap = {
'Struct': 's', 'Module': 'M', 'Function': 'f',
'Crate': 'C', 'Let': 'v', 'StructField': 'm',
'Impl': 'i', 'Enum': 'e', 'EnumVariant': 'E',
'Type': 't', 'FnArg': 'v', 'Trait': 'T',
'Const': 'c'
}
candidates = []
insert_paren = int(self.vim.eval('g:racer_insert_paren'))
for line in [l[6:] for l
in self.get_results(context, 'complete',
context['complete_position'] + 1)
if l.startswith('MATCH')]:
completions = line.split(',')
kind = typeMap.get(completions[4], '')
completion = { 'kind': kind, 'word': completions[0] }
if kind == 'f': # function
completion['menu'] = ','.join(completions[5:]).replace(
'pub ', '').replace('fn ', '').rstrip('{')
if ' where ' in completion['menu'] or completion[
'menu'].endswith(' where') :
where = completion['menu'].rindex(' where')
completion['menu'] = completion['menu'][: where]
if insert_paren:
completion['abbr'] = completions[0]
completion['word'] += '('
elif kind == 's' : # struct
completion['menu'] = ','.join(completions[5:]).replace(
'pub ', '').replace( 'struct ', '').rstrip('{')
candidates.append(completion)
return candidates
def get_results(self, context, command, col):
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') as tf:
tf.write("\n".join(self.vim.current.buffer))
tf.flush()
args = [
self.__racer, command,
str(self.vim.funcs.line('.')),
str(col - 1),
tf.name
] if command == 'prefix' else [
self.__racer, command,
str(self.vim.funcs.line('.')),
str(col - 1),
self.vim.current.buffer.name,
tf.name
]
try:
results = subprocess.check_output(args).decode(
context['encoding']).splitlines()
except subprocess.CalledProcessError:
return []
return results
| mit | -6,456,718,800,528,790,000 | 40.227273 | 78 | 0.523925 | false |
Micutio/ComplexAutomatonBase | cab/util/io_pygame.py | 1 | 3477 | """
This module contains a CAB io implementation in PyGame.
"""
import pygame
import pygame.gfxdraw
import pygame.locals
import math
import cab.abm.agent as cab_agent
import cab.ca.cell as cab_cell
import cab.util.io_pygame_input as cab_pygame_io
import cab.util.io_interface as cab_io
import cab.global_constants as cab_gc
import cab.util.logging as cab_log
__author__ = 'Michael Wagner'
class PygameIO(cab_io.IoInterface):
"""
This class incorporates all methods necessary for visualizing the simulation.
"""
def __init__(self, gc: cab_gc.GlobalConstants, cab_core):
"""
Initializes the visualization.
"""
super().__init__(gc, cab_core)
self.abm = cab_core.abm
self.ca = cab_core.ca
self.surface = None
self.io_handler = cab_pygame_io.InputHandler(cab_core)
# Initialize UI components.
pygame.init()
# pygame.display.init()
if self.gc.USE_HEX_CA:
offset_x = int((math.sqrt(3) / 2) * (self.gc.CELL_SIZE * 2) * (self.gc.DIM_X - 1))
offset_y = int((3 / 4) * (self.gc.CELL_SIZE * 2) * (self.gc.DIM_Y - 1))
# print(offset)
# self.screen = pygame.display.set_mode((self.gc.GRID_WIDTH, self.gc.GRID_HEIGHT), pygame.RESIZABLE, 32)
else:
offset_x = self.gc.CELL_SIZE * self.gc.DIM_X
offset_y = self.gc.CELL_SIZE * self.gc.DIM_Y
self.surface = pygame.display.set_mode((offset_x, offset_y), pygame.locals.HWSURFACE | pygame.locals.DOUBLEBUF, 32)
pygame.display.set_caption('Complex Automaton Base')
cab_log.trace("[PygameIO] initializing done")
def render_frame(self):
self.io_handler.process_input()
if self.gc.RUN_SIMULATION:
self.core.step_simulation()
draw_cell = self.draw_cell
for c in list(self.ca.ca_grid.values()):
draw_cell(c)
draw_agent = self.draw_agent
for a in self.abm.agent_set:
draw_agent(a)
pygame.display.flip()
# TODO: Change render_simulation to fit the whole simulation loop inside.
def render_simulation(self):
cab_log.trace("[PygameIO] start rendering simulation")
while True:
self.render_frame()
def draw_agent(self, agent: cab_agent.CabAgent):
"""
Simple exemplary visualization. Draw agent as a black circle
"""
if agent.x is not None and agent.y is not None and not agent.dead:
radius = int(agent.size / 1.25)
horiz = self.gc.CELL_SIZE * 2 * (math.sqrt(3) / 2)
offset = agent.y * (horiz / 2)
x = int(agent.x * horiz) + int(offset)
vert = self.gc.CELL_SIZE * 2 * (3 / 4)
y = int(agent.y * vert)
pygame.draw.circle(self.surface, agent.color, (x, y), radius, 0)
pygame.gfxdraw.aacircle(self.surface, x, y, radius, (50, 100, 50))
def draw_cell(self, cell: cab_cell.CACell):
"""
Simple exemplary visualization. Draw cell in white.
"""
if cell is None:
pass
else:
pygame.gfxdraw.filled_polygon(self.surface, cell.get_corners(), cell.color)
if self.gc.DISPLAY_GRID:
pygame.gfxdraw.aapolygon(self.surface, cell.get_corners(), self.gc.DEFAULT_GRID_COLOR)
else:
pygame.gfxdraw.aapolygon(self.surface, cell.get_corners(), cell.color)
return
| mit | 3,222,788,372,253,354,000 | 34.121212 | 123 | 0.597066 | false |
kinow-io/kinow-python-sdk | kinow_client/rest.py | 1 | 12352 | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.41
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import io
import json
import ssl
import certifi
import logging
import re
# python 2 and python 3 compatibility library
from six import PY3
from six.moves.urllib.parse import urlencode
from .configuration import Configuration
try:
import urllib3
except ImportError:
raise ImportError('Swagger python client requires urllib3.')
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""
Returns a dictionary of the response headers.
"""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""
Returns a given response header.
"""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, pools_size=4, maxsize=4):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680
# maxsize is the number of requests to host that are allowed in parallel
# ca_certs vs cert_file vs key_file
# http://stackoverflow.com/a/23957365/2985775
# cert_reqs
if Configuration().verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if Configuration().ssl_ca_cert:
ca_certs = Configuration().ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
# cert_file
cert_file = Configuration().cert_file
# key file
key_file = Configuration().key_file
# https pool manager
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=cert_file,
key_file=key_file
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True, _request_timeout=None):
"""
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will be returned without
reading/decoding response data. Default is True.
:param _request_timeout: timeout setting for this request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', 'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if PY3 else (int, long)):
timeout = urllib3.Timeout(total=_request_timeout)
elif isinstance(_request_timeout, tuple) and len(_request_timeout) == 2:
timeout = urllib3.Timeout(connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body:
request_body = json.dumps(body)
r = self.pool_manager.request(method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded':
r = self.pool_manager.request(method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct Content-Type
# which generated by urllib3 will be overwritten.
del headers['Content-Type']
r = self.pool_manager.request(method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is provided
# in serialized form
elif isinstance(body, str):
request_body = body
r = self.pool_manager.request(method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided arguments.
Please check that your arguments match declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if PY3:
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s", r.data)
if r.status not in range(200, 206):
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None, _preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""
Custom error messages for exception
"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
| apache-2.0 | -1,410,129,561,356,911,400 | 40.589226 | 120 | 0.511253 | false |
eugeneai/icc.studprogs | src/icc/studprogs/textloader.py | 1 | 1198 | from icc.studprogs.common import *
class Loader(BaseLoader):
"""Loads text and divides it on [paragraph] lokens.
"""
def lines(self):
"""
•
-
"""
linequeue=[]
for line in self.file:
if self._skip>0:
self._skip-=1
continue
if line.startswith(b"\x0c"):
line=line.lstrip(b"\x0c")
prev=linequeue.pop()
if prev==paragraph_symbol: # must be an empty string
prev=linequeue.pop()
try:
int(prev.strip())
except ValueError:
linequeue.append(prev)
else:
linequeue.append(prev)
linequeue.append(page_symbol)
uniline=line.decode(self.encoding)
if uniline.strip():
linequeue.append(uniline)
elif linequeue[-1]!=paragraph_symbol:
linequeue.append(paragraph_symbol)
if len(linequeue)<10:
continue
yield linequeue.pop(0)
yield from linequeue
| gpl-3.0 | 1,462,403,996,640,601,300 | 26.72093 | 69 | 0.457215 | false |
chrisgilmerproj/brewday | brew/yeasts.py | 1 | 2678 | # -*- coding: utf-8 -*-
import json
import sys
import textwrap
from .exceptions import YeastException
from .validators import validate_optional_fields
from .validators import validate_percentage
from .validators import validate_required_fields
__all__ = [u"Yeast"]
class Yeast(object):
"""
A representation of a type of Yeast as added to a Recipe.
"""
def __init__(self, name, percent_attenuation=0.75):
"""
:param float percent_attenuation: The percentage the yeast is expected to attenuate the sugar in the yeast to create alcohol
:raises YeastException: If percent_attenuation is not provided
""" # noqa
self.name = name
if percent_attenuation is None:
raise YeastException(
u"{}: Must provide percent attenuation".format(self.name) # noqa
)
self.percent_attenuation = validate_percentage(percent_attenuation)
def __str__(self):
if sys.version_info[0] >= 3:
return self.__unicode__()
else:
return self.__unicode__().encode(u"utf8")
def __unicode__(self):
return u"{0}, attenuation {1:0.1%}".format(
self.name.capitalize(), self.percent_attenuation
)
def __repr__(self):
out = u"{0}('{1}'".format(type(self).__name__, self.name)
if self.percent_attenuation:
out = u"{0}, percent_attenuation={1}".format(out, self.percent_attenuation)
out = u"{0})".format(out)
return out
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if (self.name == other.name) and (
self.percent_attenuation == other.percent_attenuation
):
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def to_dict(self):
return {
u"name": self.name,
u"data": {u"percent_attenuation": self.percent_attenuation},
}
def to_json(self):
return json.dumps(self.to_dict(), sort_keys=True)
@classmethod
def validate(cls, yeast_data):
required_fields = [(u"name", str)]
optional_fields = [(u"percent_attenuation", float)]
validate_required_fields(yeast_data, required_fields)
validate_optional_fields(yeast_data, optional_fields)
def format(self):
msg = textwrap.dedent(
u"""\
{name} Yeast
-----------------------------------
Attenuation: {data[percent_attenuation]:0.1%}""".format(
**self.to_dict()
)
)
return msg
| mit | -5,421,846,657,522,095,000 | 30.139535 | 132 | 0.567588 | false |
mikrosimage/rez | src/rez/resolver.py | 1 | 16766 | from rez.solver import Solver, SolverStatus, PackageVariantCache
from rez.package_repository import package_repository_manager
from rez.packages_ import get_variant, get_last_release_time
from rez.package_filter import PackageFilterList, TimestampRule
from rez.utils.memcached import memcached_client, pool_memcached_connections
from rez.utils.logging_ import log_duration
from rez.config import config
from rez.vendor.enum import Enum
from contextlib import contextmanager
from hashlib import sha1
import os
class ResolverStatus(Enum):
""" Enum to represent the current state of a resolver instance. The enum
also includes a human readable description of what the state represents.
"""
pending = ("The resolve has not yet started.", )
solved = ("The resolve has completed successfully.", )
failed = ("The resolve is not possible.", )
aborted = ("The resolve was stopped by the user (via callback).", )
def __init__(self, description):
self.description = description
class Resolver(object):
"""The package resolver.
The Resolver uses a combination of Solver(s) and cache(s) to resolve a
package request as quickly as possible.
"""
def __init__(self, package_requests, package_paths, package_filter=None,
package_orderers=None, timestamp=0, callback=None, building=False,
verbosity=False, buf=None, package_load_callback=None, caching=True):
"""Create a Resolver.
Args:
package_requests: List of Requirement objects representing the
request.
package_paths: List of paths to search for pkgs.
package_filter (`PackageFilterList`): Package filter.
package_orderers (list of `PackageOrder`): Custom package ordering.
callback: See `Solver`.
package_load_callback: If not None, this callable will be called
prior to each package being loaded. It is passed a single
`Package` object.
building: True if we're resolving for a build.
caching: If True, cache(s) may be used to speed the resolve. If
False, caches will not be used.
"""
self.package_requests = package_requests
self.package_paths = package_paths
self.timestamp = timestamp
self.callback = callback
self.package_orderers = package_orderers
self.package_load_callback = package_load_callback
self.building = building
self.verbosity = verbosity
self.caching = caching
self.buf = buf
# store hash of package orderers. This is used in the memcached key
if package_orderers:
sha1s = ''.join(x.sha1 for x in package_orderers)
self.package_orderers_hash = sha1(sha1s).hexdigest()
else:
self.package_orderers_hash = ''
# store hash of pre-timestamp-combined package filter. This is used in
# the memcached key
if package_filter:
self.package_filter_hash = package_filter.sha1
else:
self.package_filter_hash = ''
# combine timestamp and package filter into single filter
if self.timestamp:
if package_filter:
self.package_filter = package_filter.copy()
else:
self.package_filter = PackageFilterList()
rule = TimestampRule.after(self.timestamp)
self.package_filter.add_exclusion(rule)
else:
self.package_filter = package_filter
self.status_ = ResolverStatus.pending
self.resolved_packages_ = None
self.failure_description = None
self.graph_ = None
self.from_cache = False
self.memcached_servers = config.memcached_uri if config.resolve_caching else None
self.solve_time = 0.0 # time spent solving
self.load_time = 0.0 # time spent loading package resources
self._print = config.debug_printer("resolve_memcache")
@pool_memcached_connections
def solve(self):
"""Perform the solve.
"""
with log_duration(self._print, "memcache get (resolve) took %s"):
solver_dict = self._get_cached_solve()
if solver_dict:
self.from_cache = True
self._set_result(solver_dict)
else:
self.from_cache = False
solver = self._solve()
solver_dict = self._solver_to_dict(solver)
self._set_result(solver_dict)
with log_duration(self._print, "memcache set (resolve) took %s"):
self._set_cached_solve(solver_dict)
@property
def status(self):
"""Return the current status of the resolve.
Returns:
ResolverStatus.
"""
return self.status_
@property
def resolved_packages(self):
"""Get the list of resolved packages.
Returns:
List of `PackageVariant` objects, or None if the resolve has not
completed.
"""
return self.resolved_packages_
@property
def graph(self):
"""Return the resolve graph.
The resolve graph shows unsuccessful as well as successful resolves.
Returns:
A pygraph.digraph object, or None if the solve has not completed.
"""
return self.graph_
def _get_cached_solve(self):
"""Find a memcached resolve.
If there is NOT a resolve timestamp:
- fetch a non-timestamped memcache entry;
- if no entry, then fail;
- if packages have changed, then:
- delete the entry;
- fail;
- if no packages in the entry have been released since, then
- use the entry and return;
- else:
- delete the entry;
- fail.
If there IS a resolve timestamp (let us call this T):
- fetch a non-timestamped memcache entry;
- if entry then:
- if no packages have changed, then:
- if no packages in the entry have been released since:
- if no packages in the entry were released after T, then
- use the entry and return;
- else:
- delete the entry;
- else:
- delete the entry;
- fetch a timestamped (T) memcache entry;
- if no entry, then fail;
- if packages have changed, then:
- delete the entry;
- fail;
- else:
- use the entry.
This behaviour exists specifically so that resolves that use a
timestamp but set that to the current time, can be reused by other
resolves if nothing has changed. Older resolves however, can only be
reused if the timestamp matches exactly (but this might happen a lot -
consider a workflow where a work area is tied down to a particular
timestamp in order to 'lock' it from any further software releases).
"""
if not (self.caching and self.memcached_servers):
return None
# these caches avoids some potentially repeated file stats
variant_states = {}
last_release_times = {}
def _hit(data):
solver_dict, _, _ = data
return solver_dict
def _miss():
self._print("No cache key retrieved")
return None
def _delete_cache_entry(key):
with self._memcached_client() as client:
client.delete(key)
self._print("Discarded entry: %r", key)
def _retrieve(timestamped):
key = self._memcache_key(timestamped=timestamped)
self._print("Retrieving memcache key: %r", key)
with self._memcached_client() as client:
data = client.get(key)
return key, data
def _packages_changed(key, data):
solver_dict, _, variant_states_dict = data
for variant_handle in solver_dict.get("variant_handles", []):
variant = get_variant(variant_handle)
old_state = variant_states_dict.get(variant.name)
new_state = variant_states.get(variant)
if new_state is None:
repo = variant.resource._repository
new_state = repo.get_variant_state_handle(variant.resource)
variant_states[variant] = new_state
if old_state != new_state:
self._print("%r has been modified", variant.qualified_name)
return True
return False
def _releases_since_solve(key, data):
_, release_times_dict, _ = data
for package_name, release_time in release_times_dict.iteritems():
time_ = last_release_times.get(package_name)
if time_ is None:
time_ = get_last_release_time(package_name, self.package_paths)
last_release_times[package_name] = time_
if time_ != release_time:
self._print(
"A newer version of %r (%d) has been released since the "
"resolve was cached (latest release in cache was %d) "
"(entry: %r)", package_name, time_, release_time, key)
return True
return False
def _timestamp_is_earlier(key, data):
_, release_times_dict, _ = data
for package_name, release_time in release_times_dict.iteritems():
if self.timestamp < release_time:
self._print("Resolve timestamp (%d) is earlier than %r in "
"solve (%d) (entry: %r)", self.timestamp,
package_name, release_time, key)
return True
return False
key, data = _retrieve(False)
if self.timestamp:
if data:
if _packages_changed(key, data) or _releases_since_solve(key, data):
_delete_cache_entry(key)
elif not _timestamp_is_earlier(key, data):
return _hit(data)
key, data = _retrieve(True)
if not data:
return _miss()
if _packages_changed(key, data):
_delete_cache_entry(key)
return _miss()
else:
return _hit(data)
else:
if not data:
return _miss()
if _packages_changed(key, data) or _releases_since_solve(key, data):
_delete_cache_entry(key)
return _miss()
else:
return _hit(data)
@contextmanager
def _memcached_client(self):
with memcached_client(self.memcached_servers,
debug=config.debug_memcache) as client:
yield client
def _set_cached_solve(self, solver_dict):
"""Store a solve to memcached.
If there is NOT a resolve timestamp:
- store the solve to a non-timestamped entry.
If there IS a resolve timestamp (let us call this T):
- if NO newer package in the solve has been released since T,
- then store the solve to a non-timestamped entry;
- else:
- store the solve to a timestamped entry.
"""
if self.status_ != ResolverStatus.solved:
return # don't cache failed solves
if not (self.caching and self.memcached_servers):
return
# most recent release times get stored with solve result in the cache
releases_since_solve = False
release_times_dict = {}
variant_states_dict = {}
for variant in self.resolved_packages_:
time_ = get_last_release_time(variant.name, self.package_paths)
# don't cache if a release time isn't known
if time_ == 0:
self._print("Did not send memcache key: a repository could "
"not provide a most recent release time for %r",
variant.name)
return
if self.timestamp and self.timestamp < time_:
releases_since_solve = True
release_times_dict[variant.name] = time_
repo = variant.resource._repository
variant_states_dict[variant.name] = \
repo.get_variant_state_handle(variant.resource)
timestamped = (self.timestamp and releases_since_solve)
key = self._memcache_key(timestamped=timestamped)
data = (solver_dict, release_times_dict, variant_states_dict)
with self._memcached_client() as client:
client.set(key, data)
self._print("Sent memcache key: %r", key)
def _memcache_key(self, timestamped=False):
"""Makes a key suitable as a memcache entry."""
request = tuple(map(str, self.package_requests))
repo_ids = []
for path in self.package_paths:
repo = package_repository_manager.get_repository(path)
repo_ids.append(repo.uid)
t = ["resolve",
request,
tuple(repo_ids),
self.package_filter_hash,
self.package_orderers_hash,
self.building,
config.prune_failed_graph]
if timestamped and self.timestamp:
t.append(self.timestamp)
return str(tuple(t))
def _solve(self):
solver = Solver(package_requests=self.package_requests,
package_paths=self.package_paths,
package_filter=self.package_filter,
package_orderers=self.package_orderers,
callback=self.callback,
package_load_callback=self.package_load_callback,
building=self.building,
verbosity=self.verbosity,
prune_unfailed=config.prune_failed_graph,
buf=self.buf)
solver.solve()
return solver
def _set_result(self, solver_dict):
self.status_ = solver_dict.get("status")
self.graph_ = solver_dict.get("graph")
self.solve_time = solver_dict.get("solve_time")
self.load_time = solver_dict.get("load_time")
self.failure_description = solver_dict.get("failure_description")
self.resolved_packages_ = None
if self.status_ == ResolverStatus.solved:
# convert solver.Variants to packages.Variants
self.resolved_packages_ = []
for variant_handle in solver_dict.get("variant_handles", []):
variant = get_variant(variant_handle)
self.resolved_packages_.append(variant)
@classmethod
def _solver_to_dict(cls, solver):
graph_ = solver.get_graph()
solve_time = solver.solve_time
load_time = solver.load_time
failure_description = None
variant_handles = None
st = solver.status
if st == SolverStatus.unsolved:
status_ = ResolverStatus.aborted
failure_description = solver.abort_reason
elif st == SolverStatus.failed:
status_ = ResolverStatus.failed
failure_description = solver.failure_description()
elif st == SolverStatus.solved:
status_ = ResolverStatus.solved
variant_handles = []
for solver_variant in solver.resolved_packages:
variant_handle_dict = solver_variant.handle
variant_handles.append(variant_handle_dict)
return dict(
status=status_,
graph=graph_,
solve_time=solve_time,
load_time=load_time,
failure_description=failure_description,
variant_handles=variant_handles)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 | -3,009,298,589,355,116,500 | 37.366133 | 89 | 0.579983 | false |
valentin-krasontovitsch/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py | 1 | 92306 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachine
version_added: "2.1"
short_description: Manage Azure virtual machines.
description:
- Create, update, stop and start a virtual machine. Provide an existing storage account and network interface or
allow the module to create these for you. If you choose not to provide a network interface, the resource group
must contain a virtual network with at least one subnet.
- Before Ansible 2.5, this required an image found in the Azure Marketplace which can be discovered with
M(azure_rm_virtualmachineimage_facts). In Ansible 2.5 and newer, custom images can be used as well, see the
examples for more details.
- If you need to use the I(custom_data) option, many images in the marketplace are not cloud-init ready. Thus, data
sent to I(custom_data) would be ignored. If the image you are attempting to use is not listed in
U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init#cloud-init-overview),
follow these steps U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cloudinit-prepare-custom-image).
options:
resource_group:
description:
- Name of the resource group containing the virtual machine.
required: true
name:
description:
- Name of the virtual machine.
required: true
custom_data:
description:
- Data which is made available to the virtual machine and used by e.g., cloud-init.
version_added: "2.5"
state:
description:
- Assert the state of the virtual machine.
- State 'present' will check that the machine exists with the requested configuration. If the configuration
of the existing machine does not match, the machine will be updated. Use options started, allocated and restarted to change the machine's power
state.
- State 'absent' will remove the virtual machine.
default: present
choices:
- absent
- present
started:
description:
- Use with state 'present' to start the machine. Set to false to have the machine be 'stopped'.
default: true
type: bool
allocated:
description:
- Toggle that controls if the machine is allocated/deallocated, only useful with state='present'.
default: True
type: bool
generalized:
description:
- Use with state 'present' to generalize the machine. Set to true to generalize the machine.
- Please note that this operation is irreversible.
type: bool
version_added: "2.8"
restarted:
description:
- Use with state 'present' to restart a running VM.
type: bool
location:
description:
- Valid Azure location. Defaults to location of the resource group.
short_hostname:
description:
- Name assigned internally to the host. On a linux VM this is the name returned by the `hostname` command.
When creating a virtual machine, short_hostname defaults to name.
vm_size:
description:
- A valid Azure VM size value. For example, 'Standard_D4'. The list of choices varies depending on the
subscription and location. Check your subscription for available choices. Required when creating a VM.
admin_username:
description:
- Admin username used to access the host after it is created. Required when creating a VM.
admin_password:
description:
- Password for the admin username. Not required if the os_type is Linux and SSH password authentication
is disabled by setting ssh_password_enabled to false.
ssh_password_enabled:
description:
- When the os_type is Linux, setting ssh_password_enabled to false will disable SSH password authentication
and require use of SSH keys.
default: true
type: bool
ssh_public_keys:
description:
- "For os_type Linux provide a list of SSH keys. Each item in the list should be a dictionary where the
dictionary contains two keys: path and key_data. Set the path to the default location of the
authorized_keys files. On an Enterprise Linux host, for example, the path will be
/home/<admin username>/.ssh/authorized_keys. Set key_data to the actual value of the public key."
image:
description:
- Specifies the image used to build the VM.
- If a string, the image is sourced from a custom image based on the
name.
- 'If a dict with the keys C(publisher), C(offer), C(sku), and
C(version), the image is sourced from a Marketplace image. NOTE:
set image.version to C(latest) to get the most recent version of a
given image.'
- 'If a dict with the keys C(name) and C(resource_group), the image
is sourced from a custom image based on the C(name) and
C(resource_group) set. NOTE: the key C(resource_group) is optional
and if omitted, all images in the subscription will be searched
for by C(name).'
- Custom image support was added in Ansible 2.5
required: true
availability_set:
description:
- Name or ID of an existing availability set to add the VM to. The availability_set should be in the same resource group as VM.
version_added: "2.5"
storage_account_name:
description:
- Name of an existing storage account that supports creation of VHD blobs. If not specified for a new VM,
a new storage account named <vm name>01 will be created using storage type 'Standard_LRS'.
aliases:
- storage_account
storage_container_name:
description:
- Name of the container to use within the storage account to store VHD blobs. If no name is specified a
default container will created.
default: vhds
aliases:
- storage_container
storage_blob_name:
description:
- Name of the storage blob used to hold the VM's OS disk image. If no name is provided, defaults to
the VM name + '.vhd'. If you provide a name, it must end with '.vhd'
aliases:
- storage_blob
managed_disk_type:
description:
- Managed OS disk type
choices:
- Standard_LRS
- StandardSSD_LRS
- Premium_LRS
version_added: "2.4"
os_disk_name:
description:
- OS disk name
version_added: "2.8"
os_disk_caching:
description:
- Type of OS disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
aliases:
- disk_caching
os_disk_size_gb:
description:
- Type of OS disk size in GB.
version_added: "2.7"
os_type:
description:
- Base type of operating system.
choices:
- Windows
- Linux
default: Linux
data_disks:
description:
- Describes list of data disks.
version_added: "2.4"
suboptions:
lun:
description:
- The logical unit number for data disk
default: 0
version_added: "2.4"
disk_size_gb:
description:
- The initial disk size in GB for blank data disks
version_added: "2.4"
managed_disk_type:
description:
- Managed data disk type
choices:
- Standard_LRS
- StandardSSD_LRS
- Premium_LRS
version_added: "2.4"
storage_account_name:
description:
- Name of an existing storage account that supports creation of VHD blobs. If not specified for a new VM,
a new storage account named <vm name>01 will be created using storage type 'Standard_LRS'.
version_added: "2.4"
storage_container_name:
description:
- Name of the container to use within the storage account to store VHD blobs. If no name is specified a
default container will created.
default: vhds
version_added: "2.4"
storage_blob_name:
description:
- Name fo the storage blob used to hold the VM's OS disk image. If no name is provided, defaults to
the VM name + '.vhd'. If you provide a name, it must end with '.vhd'
version_added: "2.4"
caching:
description:
- Type of data disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
version_added: "2.4"
public_ip_allocation_method:
description:
- If a public IP address is created when creating the VM (because a Network Interface was not provided),
determines if the public IP address remains permanently associated with the Network Interface. If set
to 'Dynamic' the public IP address may change any time the VM is rebooted or power cycled.
- The C(Disabled) choice was added in Ansible 2.6.
choices:
- Dynamic
- Static
- Disabled
default: Static
aliases:
- public_ip_allocation
open_ports:
description:
- If a network interface is created when creating the VM, a security group will be created as well. For
Linux hosts a rule will be added to the security group allowing inbound TCP connections to the default
SSH port 22, and for Windows hosts ports 3389 and 5986 will be opened. Override the default open ports by
providing a list of ports.
network_interface_names:
description:
- List of existing network interface names to add to the VM.
- Item can be a str of name or resource id of the network interface.
- Item can also be a dict contains C(resource_group) and C(name) of the network interface.
- If a network interface name is not provided when the VM is created, a default network interface will be created.
- In order for the module to create a new network interface, at least one Virtual Network with one Subnet must exist.
aliases:
- network_interfaces
virtual_network_resource_group:
description:
- When creating a virtual machine, if a specific virtual network from another resource group should be
used, use this parameter to specify the resource group to use.
version_added: "2.4"
virtual_network_name:
description:
- When creating a virtual machine, if a network interface name is not provided, one will be created.
- The network interface will be assigned to the first virtual network found in the resource group.
- Use this parameter to provide a specific virtual network instead.
- If the virtual network in in another resource group, specific resource group by C(virtual_network_resource_group).
aliases:
- virtual_network
subnet_name:
description:
- When creating a virtual machine, if a network interface name is not provided, one will be created.
- The new network interface will be assigned to the first subnet found in the virtual network.
- Use this parameter to provide a specific subnet instead.
- If the subnet is in another resource group, specific resource group by C(virtual_network_resource_group).
aliases:
- subnet
remove_on_absent:
description:
- "When removing a VM using state 'absent', also remove associated resources."
- "It can be 'all' or 'all_autocreated' or a list with any of the following: ['network_interfaces', 'virtual_storage', 'public_ips']."
- "To remove all resources referred by VM use 'all'."
- "To remove all resources that were automatically created while provisioning VM use 'all_autocreated'."
- Any other input will be ignored.
default: ['all']
plan:
description:
- A dictionary describing a third-party billing plan for an instance
version_added: 2.5
suboptions:
name:
description:
- billing plan name
required: true
product:
description:
- product name
required: true
publisher:
description:
- publisher offering the plan
required: true
promotion_code:
description:
- optional promotion code
accept_terms:
description:
- Accept terms for marketplace images that require it
- Only Azure service admin/account admin users can purchase images from the marketplace
type: bool
default: false
version_added: "2.7"
zones:
description:
- A list of Availability Zones for your virtual machine
type: list
version_added: "2.8"
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create VM with defaults
azure_rm_virtualmachine:
resource_group: Testing
name: testvm10
admin_username: chouseknecht
admin_password: <your password here>
image:
offer: CentOS
publisher: OpenLogic
sku: '7.1'
version: latest
- name: Create an availability set for managed disk vm
azure_rm_availabilityset:
name: avs-managed-disk
resource_group: Testing
platform_update_domain_count: 5
platform_fault_domain_count: 2
sku: Aligned
- name: Create a VM with managed disk
azure_rm_virtualmachine:
resource_group: Testing
name: vm-managed-disk
admin_username: adminUser
availability_set: avs-managed-disk
managed_disk_type: Standard_LRS
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
vm_size: Standard_D4
- name: Create a VM with existing storage account and NIC
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
vm_size: Standard_D4
storage_account: testaccount001
admin_username: adminUser
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
network_interfaces: testvm001
image:
offer: CentOS
publisher: OpenLogic
sku: '7.1'
version: latest
- name: Create a VM with OS and multiple data managed disks
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_D4
managed_disk_type: Standard_LRS
admin_username: adminUser
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
data_disks:
- lun: 0
disk_size_gb: 64
managed_disk_type: Standard_LRS
- lun: 1
disk_size_gb: 128
managed_disk_type: Premium_LRS
- name: Create a VM with OS and multiple data storage accounts
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_DS1_v2
admin_username: adminUser
ssh_password_enabled: false
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
network_interfaces: testvm001
storage_container: osdisk
storage_blob: osdisk.vhd
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
data_disks:
- lun: 0
disk_size_gb: 64
storage_container_name: datadisk1
storage_blob_name: datadisk1.vhd
- lun: 1
disk_size_gb: 128
storage_container_name: datadisk2
storage_blob_name: datadisk2.vhd
- name: Create a VM with a custom image
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_DS1_v2
admin_username: adminUser
admin_password: password01
image: customimage001
- name: Create a VM with a custom image from a particular resource group
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_DS1_v2
admin_username: adminUser
admin_password: password01
image:
name: customimage001
resource_group: Testing
- name: Create VM with spcified OS disk size
azure_rm_virtualmachine:
resource_group: Testing
name: big-os-disk
admin_username: chouseknecht
admin_password: <your password here>
os_disk_size_gb: 512
image:
offer: CentOS
publisher: OpenLogic
sku: '7.1'
version: latest
- name: Create VM with OS and Plan, accepting the terms
azure_rm_virtualmachine:
resource_group: Testing
name: f5-nva
admin_username: chouseknecht
admin_password: <your password here>
image:
publisher: f5-networks
offer: f5-big-ip-best
sku: f5-bigip-virtual-edition-200m-best-hourly
version: latest
plan:
name: f5-bigip-virtual-edition-200m-best-hourly
product: f5-big-ip-best
publisher: f5-networks
- name: Power Off
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
started: no
- name: Deallocate
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
allocated: no
- name: Power On
azure_rm_virtualmachine:
resource_group:
name: testvm002
- name: Restart
azure_rm_virtualmachine:
resource_group:
name: testvm002
restarted: yes
- name: Create a VM with an Availability Zone
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_DS1_v2
admin_username: adminUser
admin_password: password01
image: customimage001
zones: [1]
- name: Remove a VM and all resources that were autocreated
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
remove_on_absent: all_autocreated
state: absent
'''
RETURN = '''
powerstate:
description: Indicates if the state is running, stopped, deallocated, generalized
returned: always
type: str
example: running
deleted_vhd_uris:
description: List of deleted Virtual Hard Disk URIs.
returned: 'on delete'
type: list
example: ["https://testvm104519.blob.core.windows.net/vhds/testvm10.vhd"]
deleted_network_interfaces:
description: List of deleted NICs.
returned: 'on delete'
type: list
example: ["testvm1001"]
deleted_public_ips:
description: List of deleted public IP address names.
returned: 'on delete'
type: list
example: ["testvm1001"]
azure_vm:
description: Facts about the current state of the object. Note that facts are not part of the registered output but available directly.
returned: always
type: complex
contains: {
"properties": {
"availabilitySet": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Compute/availabilitySets/MYAVAILABILITYSET"
},
"hardwareProfile": {
"vmSize": "Standard_D1"
},
"instanceView": {
"disks": [
{
"name": "testvm10.vhd",
"statuses": [
{
"code": "ProvisioningState/succeeded",
"displayStatus": "Provisioning succeeded",
"level": "Info",
"time": "2016-03-30T07:11:16.187272Z"
}
]
}
],
"statuses": [
{
"code": "ProvisioningState/succeeded",
"displayStatus": "Provisioning succeeded",
"level": "Info",
"time": "2016-03-30T20:33:38.946916Z"
},
{
"code": "PowerState/running",
"displayStatus": "VM running",
"level": "Info"
}
],
"vmAgent": {
"extensionHandlers": [],
"statuses": [
{
"code": "ProvisioningState/succeeded",
"displayStatus": "Ready",
"level": "Info",
"message": "GuestAgent is running and accepting new configurations.",
"time": "2016-03-30T20:31:16.000Z"
}
],
"vmAgentVersion": "WALinuxAgent-2.0.16"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01",
"name": "testvm10_NIC01",
"properties": {
"dnsSettings": {
"appliedDnsServers": [],
"dnsServers": []
},
"enableIPForwarding": false,
"ipConfigurations": [
{
"etag": 'W/"041c8c2a-d5dd-4cd7-8465-9125cfbe2cf8"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default",
"name": "default",
"properties": {
"privateIPAddress": "10.10.0.5",
"privateIPAllocationMethod": "Dynamic",
"provisioningState": "Succeeded",
"publicIPAddress": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/testvm10_PIP01",
"name": "testvm10_PIP01",
"properties": {
"idleTimeoutInMinutes": 4,
"ipAddress": "13.92.246.197",
"ipConfiguration": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default"
},
"provisioningState": "Succeeded",
"publicIPAllocationMethod": "Static",
"resourceGuid": "3447d987-ca0d-4eca-818b-5dddc0625b42"
}
}
}
}
],
"macAddress": "00-0D-3A-12-AA-14",
"primary": true,
"provisioningState": "Succeeded",
"resourceGuid": "10979e12-ccf9-42ee-9f6d-ff2cc63b3844",
"virtualMachine": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Compute/virtualMachines/testvm10"
}
}
}
]
},
"osProfile": {
"adminUsername": "chouseknecht",
"computerName": "test10",
"linuxConfiguration": {
"disablePasswordAuthentication": false
},
"secrets": []
},
"provisioningState": "Succeeded",
"storageProfile": {
"dataDisks": [
{
"caching": "ReadWrite",
"createOption": "empty",
"diskSizeGB": 64,
"lun": 0,
"name": "datadisk1.vhd",
"vhd": {
"uri": "https://testvm10sa1.blob.core.windows.net/datadisk/datadisk1.vhd"
}
}
],
"imageReference": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "7.1.20160308"
},
"osDisk": {
"caching": "ReadOnly",
"createOption": "fromImage",
"name": "testvm10.vhd",
"osType": "Linux",
"vhd": {
"uri": "https://testvm10sa1.blob.core.windows.net/vhds/testvm10.vhd"
}
}
}
},
"type": "Microsoft.Compute/virtualMachines"
}
''' # NOQA
import base64
import random
import re
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import parse_resource_id
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.basic import to_native, to_bytes
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict, normalize_location_name, format_resource_id
AZURE_OBJECT_CLASS = 'VirtualMachine'
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
def extract_names_from_blob_uri(blob_uri, storage_suffix):
# HACK: ditch this once python SDK supports get by URI
m = re.match(r'^https://(?P<accountname>[^.]+)\.blob\.{0}/'
r'(?P<containername>[^/]+)/(?P<blobname>.+)$'.format(storage_suffix), blob_uri)
if not m:
raise Exception("unable to parse blob uri '%s'" % blob_uri)
extracted_names = m.groupdict()
return extracted_names
class AzureRMVirtualMachine(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
custom_data=dict(type='str'),
state=dict(choices=['present', 'absent'], default='present', type='str'),
location=dict(type='str'),
short_hostname=dict(type='str'),
vm_size=dict(type='str'),
admin_username=dict(type='str'),
admin_password=dict(type='str', no_log=True),
ssh_password_enabled=dict(type='bool', default=True),
ssh_public_keys=dict(type='list'),
image=dict(type='raw'),
availability_set=dict(type='str'),
storage_account_name=dict(type='str', aliases=['storage_account']),
storage_container_name=dict(type='str', aliases=['storage_container'], default='vhds'),
storage_blob_name=dict(type='str', aliases=['storage_blob']),
os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite'],
default='ReadOnly'),
os_disk_size_gb=dict(type='int'),
managed_disk_type=dict(type='str', choices=['Standard_LRS', 'StandardSSD_LRS', 'Premium_LRS']),
os_disk_name=dict(type='str'),
os_type=dict(type='str', choices=['Linux', 'Windows'], default='Linux'),
public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static', 'Disabled'], default='Static',
aliases=['public_ip_allocation']),
open_ports=dict(type='list'),
network_interface_names=dict(type='list', aliases=['network_interfaces'], elements='raw'),
remove_on_absent=dict(type='list', default=['all']),
virtual_network_resource_group=dict(type='str'),
virtual_network_name=dict(type='str', aliases=['virtual_network']),
subnet_name=dict(type='str', aliases=['subnet']),
allocated=dict(type='bool', default=True),
restarted=dict(type='bool', default=False),
started=dict(type='bool', default=True),
generalized=dict(type='bool', default=False),
data_disks=dict(type='list'),
plan=dict(type='dict'),
accept_terms=dict(type='bool', default=False),
zones=dict(type='list')
)
self.resource_group = None
self.name = None
self.custom_data = None
self.state = None
self.location = None
self.short_hostname = None
self.vm_size = None
self.admin_username = None
self.admin_password = None
self.ssh_password_enabled = None
self.ssh_public_keys = None
self.image = None
self.availability_set = None
self.storage_account_name = None
self.storage_container_name = None
self.storage_blob_name = None
self.os_type = None
self.os_disk_caching = None
self.os_disk_size_gb = None
self.managed_disk_type = None
self.os_disk_name = None
self.network_interface_names = None
self.remove_on_absent = set()
self.tags = None
self.force = None
self.public_ip_allocation_method = None
self.open_ports = None
self.virtual_network_resource_group = None
self.virtual_network_name = None
self.subnet_name = None
self.allocated = None
self.restarted = None
self.started = None
self.generalized = None
self.differences = None
self.data_disks = None
self.plan = None
self.accept_terms = None
self.zones = None
self.results = dict(
changed=False,
actions=[],
powerstate_change=None,
ansible_facts=dict(azure_vm=None)
)
super(AzureRMVirtualMachine, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
# make sure options are lower case
self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent])
# convert elements to ints
self.zones = [int(i) for i in self.zones] if self.zones else None
changed = False
powerstate_change = None
results = dict()
vm = None
network_interfaces = []
requested_vhd_uri = None
data_disk_requested_vhd_uri = None
disable_ssh_password = None
vm_dict = None
image_reference = None
custom_image = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
self.location = normalize_location_name(self.location)
if self.state == 'present':
# Verify parameters and resolve any defaults
if self.vm_size and not self.vm_size_is_valid():
self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format(
self.vm_size
))
if self.network_interface_names:
for nic_name in self.network_interface_names:
nic = self.parse_network_interface(nic_name)
network_interfaces.append(nic)
if self.ssh_public_keys:
msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \
"each dict contains keys: path, key_data."
for key in self.ssh_public_keys:
if not isinstance(key, dict):
self.fail(msg)
if not key.get('path') or not key.get('key_data'):
self.fail(msg)
if self.image and isinstance(self.image, dict):
if all(key in self.image for key in ('publisher', 'offer', 'sku', 'version')):
marketplace_image = self.get_marketplace_image_version()
if self.image['version'] == 'latest':
self.image['version'] = marketplace_image.name
self.log("Using image version {0}".format(self.image['version']))
image_reference = self.compute_models.ImageReference(
publisher=self.image['publisher'],
offer=self.image['offer'],
sku=self.image['sku'],
version=self.image['version']
)
elif self.image.get('name'):
custom_image = True
image_reference = self.get_custom_image_reference(
self.image.get('name'),
self.image.get('resource_group'))
else:
self.fail("parameter error: expecting image to contain [publisher, offer, sku, version] or [name, resource_group]")
elif self.image and isinstance(self.image, str):
custom_image = True
image_reference = self.get_custom_image_reference(self.image)
elif self.image:
self.fail("parameter error: expecting image to be a string or dict not {0}".format(type(self.image).__name__))
if self.plan:
if not self.plan.get('name') or not self.plan.get('product') or not self.plan.get('publisher'):
self.fail("parameter error: plan must include name, product, and publisher")
if not self.storage_blob_name and not self.managed_disk_type:
self.storage_blob_name = self.name + '.vhd'
elif self.managed_disk_type:
self.storage_blob_name = self.name
if self.storage_account_name and not self.managed_disk_type:
properties = self.get_storage_account(self.storage_account_name)
requested_vhd_uri = '{0}{1}/{2}'.format(properties.primary_endpoints.blob,
self.storage_container_name,
self.storage_blob_name)
disable_ssh_password = not self.ssh_password_enabled
try:
self.log("Fetching virtual machine {0}".format(self.name))
vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview')
self.check_provisioning_state(vm, self.state)
vm_dict = self.serialize_vm(vm)
if self.state == 'present':
differences = []
current_nics = []
results = vm_dict
# Try to determine if the VM needs to be updated
if self.network_interface_names:
for nic in vm_dict['properties']['networkProfile']['networkInterfaces']:
current_nics.append(nic['id'])
if set(current_nics) != set(network_interfaces):
self.log('CHANGED: virtual machine {0} - network interfaces are different.'.format(self.name))
differences.append('Network Interfaces')
updated_nics = [dict(id=id, primary=(i is 0))
for i, id in enumerate(network_interfaces)]
vm_dict['properties']['networkProfile']['networkInterfaces'] = updated_nics
changed = True
if self.os_disk_caching and \
self.os_disk_caching != vm_dict['properties']['storageProfile']['osDisk']['caching']:
self.log('CHANGED: virtual machine {0} - OS disk caching'.format(self.name))
differences.append('OS Disk caching')
changed = True
vm_dict['properties']['storageProfile']['osDisk']['caching'] = self.os_disk_caching
if self.os_disk_name and \
self.os_disk_name != vm_dict['properties']['storageProfile']['osDisk']['name']:
self.log('CHANGED: virtual machine {0} - OS disk name'.format(self.name))
differences.append('OS Disk name')
changed = True
vm_dict['properties']['storageProfile']['osDisk']['name'] = self.os_disk_name
if self.os_disk_size_gb and \
self.os_disk_size_gb != vm_dict['properties']['storageProfile']['osDisk'].get('diskSizeGB'):
self.log('CHANGED: virtual machine {0} - OS disk size '.format(self.name))
differences.append('OS Disk size')
changed = True
vm_dict['properties']['storageProfile']['osDisk']['diskSizeGB'] = self.os_disk_size_gb
if self.vm_size and \
self.vm_size != vm_dict['properties']['hardwareProfile']['vmSize']:
self.log('CHANGED: virtual machine {0} - size '.format(self.name))
differences.append('VM size')
changed = True
vm_dict['properties']['hardwareProfile']['vmSize'] = self.vm_size
update_tags, vm_dict['tags'] = self.update_tags(vm_dict.get('tags', dict()))
if update_tags:
differences.append('Tags')
changed = True
if self.short_hostname and self.short_hostname != vm_dict['properties']['osProfile']['computerName']:
self.log('CHANGED: virtual machine {0} - short hostname'.format(self.name))
differences.append('Short Hostname')
changed = True
vm_dict['properties']['osProfile']['computerName'] = self.short_hostname
if self.started and vm_dict['powerstate'] not in ['starting', 'running'] and self.allocated:
self.log("CHANGED: virtual machine {0} not running and requested state 'running'".format(self.name))
changed = True
powerstate_change = 'poweron'
elif self.state == 'present' and vm_dict['powerstate'] == 'running' and self.restarted:
self.log("CHANGED: virtual machine {0} {1} and requested state 'restarted'"
.format(self.name, vm_dict['powerstate']))
changed = True
powerstate_change = 'restarted'
elif self.state == 'present' and not self.allocated and vm_dict['powerstate'] not in ['deallocated', 'deallocating']:
self.log("CHANGED: virtual machine {0} {1} and requested state 'deallocated'"
.format(self.name, vm_dict['powerstate']))
changed = True
powerstate_change = 'deallocated'
elif not self.started and vm_dict['powerstate'] == 'running':
self.log("CHANGED: virtual machine {0} running and requested state 'stopped'".format(self.name))
changed = True
powerstate_change = 'poweroff'
elif self.generalized and vm_dict['powerstate'] != 'generalized':
self.log("CHANGED: virtual machine {0} requested to be 'generalized'".format(self.name))
changed = True
powerstate_change = 'generalized'
vm_dict['zones'] = [int(i) for i in vm_dict['zones']] if 'zones' in vm_dict and vm_dict['zones'] else None
if self.zones != vm_dict['zones']:
self.log("CHANGED: virtual machine {0} zones".format(self.name))
differences.append('Zones')
changed = True
self.differences = differences
elif self.state == 'absent':
self.log("CHANGED: virtual machine {0} exists and requested state is 'absent'".format(self.name))
results = dict()
changed = True
except CloudError:
self.log('Virtual machine {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: virtual machine {0} does not exist but state is 'present'.".format(self.name))
changed = True
self.results['changed'] = changed
self.results['ansible_facts']['azure_vm'] = results
self.results['powerstate_change'] = powerstate_change
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
default_storage_account = None
if not vm:
# Create the VM
self.log("Create virtual machine {0}".format(self.name))
self.results['actions'].append('Created VM {0}'.format(self.name))
# Validate parameters
if not self.admin_username:
self.fail("Parameter error: admin_username required when creating a virtual machine.")
if self.os_type == 'Linux':
if disable_ssh_password and not self.ssh_public_keys:
self.fail("Parameter error: ssh_public_keys required when disabling SSH password.")
if not image_reference:
self.fail("Parameter error: an image is required when creating a virtual machine.")
availability_set_resource = None
if self.availability_set:
parsed_availability_set = parse_resource_id(self.availability_set)
availability_set = self.get_availability_set(parsed_availability_set.get('resource_group', self.resource_group),
parsed_availability_set.get('name'))
availability_set_resource = self.compute_models.SubResource(id=availability_set.id)
if self.zones:
self.fail("Parameter error: you can't use Availability Set and Availability Zones at the same time")
# Get defaults
if not self.network_interface_names:
default_nic = self.create_default_nic()
self.log("network interface:")
self.log(self.serialize_obj(default_nic, 'NetworkInterface'), pretty_print=True)
network_interfaces = [default_nic.id]
# os disk
if not self.storage_account_name and not self.managed_disk_type:
storage_account = self.create_default_storage_account()
self.log("storage account:")
self.log(self.serialize_obj(storage_account, 'StorageAccount'), pretty_print=True)
requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(
storage_account.name,
self._cloud_environment.suffixes.storage_endpoint,
self.storage_container_name,
self.storage_blob_name)
default_storage_account = storage_account # store for use by data disks if necessary
if not self.short_hostname:
self.short_hostname = self.name
nics = [self.compute_models.NetworkInterfaceReference(id=id, primary=(i is 0))
for i, id in enumerate(network_interfaces)]
# os disk
if self.managed_disk_type:
vhd = None
managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=self.managed_disk_type)
elif custom_image:
vhd = None
managed_disk = None
else:
vhd = self.compute_models.VirtualHardDisk(uri=requested_vhd_uri)
managed_disk = None
plan = None
if self.plan:
plan = self.compute_models.Plan(name=self.plan.get('name'), product=self.plan.get('product'),
publisher=self.plan.get('publisher'),
promotion_code=self.plan.get('promotion_code'))
vm_resource = self.compute_models.VirtualMachine(
location=self.location,
tags=self.tags,
os_profile=self.compute_models.OSProfile(
admin_username=self.admin_username,
computer_name=self.short_hostname,
),
hardware_profile=self.compute_models.HardwareProfile(
vm_size=self.vm_size
),
storage_profile=self.compute_models.StorageProfile(
os_disk=self.compute_models.OSDisk(
name=self.os_disk_name if self.os_disk_name else self.storage_blob_name,
vhd=vhd,
managed_disk=managed_disk,
create_option=self.compute_models.DiskCreateOptionTypes.from_image,
caching=self.os_disk_caching,
disk_size_gb=self.os_disk_size_gb
),
image_reference=image_reference,
),
network_profile=self.compute_models.NetworkProfile(
network_interfaces=nics
),
availability_set=availability_set_resource,
plan=plan,
zones=self.zones,
)
if self.admin_password:
vm_resource.os_profile.admin_password = self.admin_password
if self.custom_data:
# Azure SDK (erroneously?) wants native string type for this
vm_resource.os_profile.custom_data = to_native(base64.b64encode(to_bytes(self.custom_data)))
if self.os_type == 'Linux':
vm_resource.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(
disable_password_authentication=disable_ssh_password
)
if self.ssh_public_keys:
ssh_config = self.compute_models.SshConfiguration()
ssh_config.public_keys = \
[self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys]
vm_resource.os_profile.linux_configuration.ssh = ssh_config
# data disk
if self.data_disks:
data_disks = []
count = 0
for data_disk in self.data_disks:
if not data_disk.get('managed_disk_type'):
if not data_disk.get('storage_blob_name'):
data_disk['storage_blob_name'] = self.name + '-data-' + str(count) + '.vhd'
count += 1
if data_disk.get('storage_account_name'):
data_disk_storage_account = self.get_storage_account(data_disk['storage_account_name'])
else:
if(not default_storage_account):
data_disk_storage_account = self.create_default_storage_account()
self.log("data disk storage account:")
self.log(self.serialize_obj(data_disk_storage_account, 'StorageAccount'), pretty_print=True)
default_storage_account = data_disk_storage_account # store for use by future data disks if necessary
else:
data_disk_storage_account = default_storage_account
if not data_disk.get('storage_container_name'):
data_disk['storage_container_name'] = 'vhds'
data_disk_requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(
data_disk_storage_account.name,
self._cloud_environment.suffixes.storage_endpoint,
data_disk['storage_container_name'],
data_disk['storage_blob_name']
)
if not data_disk.get('managed_disk_type'):
data_disk_managed_disk = None
disk_name = data_disk['storage_blob_name']
data_disk_vhd = self.compute_models.VirtualHardDisk(uri=data_disk_requested_vhd_uri)
else:
data_disk_vhd = None
data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=data_disk['managed_disk_type'])
disk_name = self.name + "-datadisk-" + str(count)
count += 1
data_disk['caching'] = data_disk.get(
'caching', 'ReadOnly'
)
data_disks.append(self.compute_models.DataDisk(
lun=data_disk['lun'],
name=disk_name,
vhd=data_disk_vhd,
caching=data_disk['caching'],
create_option=self.compute_models.DiskCreateOptionTypes.empty,
disk_size_gb=data_disk['disk_size_gb'],
managed_disk=data_disk_managed_disk,
))
vm_resource.storage_profile.data_disks = data_disks
# Before creating VM accept terms of plan if `accept_terms` is True
if self.accept_terms is True:
if not all([self.plan.get('name'), self.plan.get('product'), self.plan.get('publisher')]):
self.fail("parameter error: plan must be specified and include name, product, and publisher")
try:
plan_name = self.plan.get('name')
plan_product = self.plan.get('product')
plan_publisher = self.plan.get('publisher')
term = self.marketplace_client.marketplace_agreements.get(
publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name)
term.accepted = True
agreement = self.marketplace_client.marketplace_agreements.create(
publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name, parameters=term)
except Exception as exc:
self.fail(("Error accepting terms for virtual machine {0} with plan {1}. " +
"Only service admin/account admin users can purchase images " +
"from the marketplace. - {2}").format(self.name, self.plan, str(exc)))
self.log("Create virtual machine with parameters:")
self.create_or_update_vm(vm_resource, 'all_autocreated' in self.remove_on_absent)
elif self.differences and len(self.differences) > 0:
# Update the VM based on detected config differences
self.log("Update virtual machine {0}".format(self.name))
self.results['actions'].append('Updated VM {0}'.format(self.name))
nics = [self.compute_models.NetworkInterfaceReference(id=interface['id'], primary=(i is 0))
for i, interface in enumerate(vm_dict['properties']['networkProfile']['networkInterfaces'])]
# os disk
if not vm_dict['properties']['storageProfile']['osDisk'].get('managedDisk'):
managed_disk = None
vhd = self.compute_models.VirtualHardDisk(uri=vm_dict['properties']['storageProfile']['osDisk'].get('vhd', {}).get('uri'))
else:
vhd = None
managed_disk = self.compute_models.ManagedDiskParameters(
storage_account_type=vm_dict['properties']['storageProfile']['osDisk']['managedDisk'].get('storageAccountType')
)
availability_set_resource = None
try:
availability_set_resource = self.compute_models.SubResource(id=vm_dict['properties']['availabilitySet'].get('id'))
except Exception:
# pass if the availability set is not set
pass
if 'imageReference' in vm_dict['properties']['storageProfile'].keys():
if 'id' in vm_dict['properties']['storageProfile']['imageReference'].keys():
image_reference = self.compute_models.ImageReference(
id=vm_dict['properties']['storageProfile']['imageReference']['id']
)
else:
image_reference = self.compute_models.ImageReference(
publisher=vm_dict['properties']['storageProfile']['imageReference'].get('publisher'),
offer=vm_dict['properties']['storageProfile']['imageReference'].get('offer'),
sku=vm_dict['properties']['storageProfile']['imageReference'].get('sku'),
version=vm_dict['properties']['storageProfile']['imageReference'].get('version')
)
else:
image_reference = None
# You can't change a vm zone
if vm_dict['zones'] != self.zones:
self.fail("You can't change the Availability Zone of a virtual machine (have: {0}, want: {1})".format(vm_dict['zones'], self.zones))
if 'osProfile' in vm_dict['properties']:
os_profile = self.compute_models.OSProfile(
admin_username=vm_dict['properties'].get('osProfile', {}).get('adminUsername'),
computer_name=vm_dict['properties'].get('osProfile', {}).get('computerName')
)
else:
os_profile = None
vm_resource = self.compute_models.VirtualMachine(
location=vm_dict['location'],
os_profile=os_profile,
hardware_profile=self.compute_models.HardwareProfile(
vm_size=vm_dict['properties']['hardwareProfile'].get('vmSize')
),
storage_profile=self.compute_models.StorageProfile(
os_disk=self.compute_models.OSDisk(
name=vm_dict['properties']['storageProfile']['osDisk'].get('name'),
vhd=vhd,
managed_disk=managed_disk,
create_option=vm_dict['properties']['storageProfile']['osDisk'].get('createOption'),
os_type=vm_dict['properties']['storageProfile']['osDisk'].get('osType'),
caching=vm_dict['properties']['storageProfile']['osDisk'].get('caching'),
disk_size_gb=vm_dict['properties']['storageProfile']['osDisk'].get('diskSizeGB')
),
image_reference=image_reference
),
availability_set=availability_set_resource,
network_profile=self.compute_models.NetworkProfile(
network_interfaces=nics
),
)
if vm_dict.get('tags'):
vm_resource.tags = vm_dict['tags']
# Add custom_data, if provided
if vm_dict['properties'].get('osProfile', {}).get('customData'):
custom_data = vm_dict['properties']['osProfile']['customData']
# Azure SDK (erroneously?) wants native string type for this
vm_resource.os_profile.custom_data = to_native(base64.b64encode(to_bytes(custom_data)))
# Add admin password, if one provided
if vm_dict['properties'].get('osProfile', {}).get('adminPassword'):
vm_resource.os_profile.admin_password = vm_dict['properties']['osProfile']['adminPassword']
# Add linux configuration, if applicable
linux_config = vm_dict['properties'].get('osProfile', {}).get('linuxConfiguration')
if linux_config:
ssh_config = linux_config.get('ssh', None)
vm_resource.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(
disable_password_authentication=linux_config.get('disablePasswordAuthentication', False)
)
if ssh_config:
public_keys = ssh_config.get('publicKeys')
if public_keys:
vm_resource.os_profile.linux_configuration.ssh = self.compute_models.SshConfiguration(public_keys=[])
for key in public_keys:
vm_resource.os_profile.linux_configuration.ssh.public_keys.append(
self.compute_models.SshPublicKey(path=key['path'], key_data=key['keyData'])
)
# data disk
if vm_dict['properties']['storageProfile'].get('dataDisks'):
data_disks = []
for data_disk in vm_dict['properties']['storageProfile']['dataDisks']:
if data_disk.get('managedDisk'):
managed_disk_type = data_disk['managedDisk'].get('storageAccountType')
data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=managed_disk_type)
data_disk_vhd = None
else:
data_disk_vhd = data_disk['vhd']['uri']
data_disk_managed_disk = None
data_disks.append(self.compute_models.DataDisk(
lun=int(data_disk['lun']),
name=data_disk.get('name'),
vhd=data_disk_vhd,
caching=data_disk.get('caching'),
create_option=data_disk.get('createOption'),
disk_size_gb=int(data_disk['diskSizeGB']),
managed_disk=data_disk_managed_disk,
))
vm_resource.storage_profile.data_disks = data_disks
self.log("Update virtual machine with parameters:")
self.create_or_update_vm(vm_resource, False)
# Make sure we leave the machine in requested power state
if (powerstate_change == 'poweron' and
self.results['ansible_facts']['azure_vm']['powerstate'] != 'running'):
# Attempt to power on the machine
self.power_on_vm()
elif (powerstate_change == 'poweroff' and
self.results['ansible_facts']['azure_vm']['powerstate'] == 'running'):
# Attempt to power off the machine
self.power_off_vm()
elif powerstate_change == 'restarted':
self.restart_vm()
elif powerstate_change == 'deallocated':
self.deallocate_vm()
elif powerstate_change == 'generalized':
self.power_off_vm()
self.generalize_vm()
self.results['ansible_facts']['azure_vm'] = self.serialize_vm(self.get_vm())
elif self.state == 'absent':
# delete the VM
self.log("Delete virtual machine {0}".format(self.name))
self.results['ansible_facts']['azure_vm'] = None
self.delete_vm(vm)
# until we sort out how we want to do this globally
del self.results['actions']
return self.results
def get_vm(self):
'''
Get the VM with expanded instanceView
:return: VirtualMachine object
'''
try:
vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview')
return vm
except Exception as exc:
self.fail("Error getting virtual machine {0} - {1}".format(self.name, str(exc)))
def serialize_vm(self, vm):
'''
Convert a VirtualMachine object to dict.
:param vm: VirtualMachine object
:return: dict
'''
result = self.serialize_obj(vm, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
result['id'] = vm.id
result['name'] = vm.name
result['type'] = vm.type
result['location'] = vm.location
result['tags'] = vm.tags
result['powerstate'] = dict()
if vm.instance_view:
result['powerstate'] = next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
for s in vm.instance_view.statuses:
if s.code.lower() == "osstate/generalized":
result['powerstate'] = 'generalized'
# Expand network interfaces to include config properties
for interface in vm.network_profile.network_interfaces:
int_dict = azure_id_to_dict(interface.id)
nic = self.get_network_interface(int_dict['resourceGroups'], int_dict['networkInterfaces'])
for interface_dict in result['properties']['networkProfile']['networkInterfaces']:
if interface_dict['id'] == interface.id:
nic_dict = self.serialize_obj(nic, 'NetworkInterface')
interface_dict['name'] = int_dict['networkInterfaces']
interface_dict['properties'] = nic_dict['properties']
# Expand public IPs to include config properties
for interface in result['properties']['networkProfile']['networkInterfaces']:
for config in interface['properties']['ipConfigurations']:
if config['properties'].get('publicIPAddress'):
pipid_dict = azure_id_to_dict(config['properties']['publicIPAddress']['id'])
try:
pip = self.network_client.public_ip_addresses.get(pipid_dict['resourceGroups'],
pipid_dict['publicIPAddresses'])
except Exception as exc:
self.fail("Error fetching public ip {0} - {1}".format(pipid_dict['publicIPAddresses'],
str(exc)))
pip_dict = self.serialize_obj(pip, 'PublicIPAddress')
config['properties']['publicIPAddress']['name'] = pipid_dict['publicIPAddresses']
config['properties']['publicIPAddress']['properties'] = pip_dict['properties']
self.log(result, pretty_print=True)
if self.state != 'absent' and not result['powerstate']:
self.fail("Failed to determine PowerState of virtual machine {0}".format(self.name))
return result
def power_off_vm(self):
self.log("Powered off virtual machine {0}".format(self.name))
self.results['actions'].append("Powered off virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.power_off(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error powering off virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def power_on_vm(self):
self.results['actions'].append("Powered on virtual machine {0}".format(self.name))
self.log("Power on virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.start(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error powering on virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def restart_vm(self):
self.results['actions'].append("Restarted virtual machine {0}".format(self.name))
self.log("Restart virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.restart(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error restarting virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def deallocate_vm(self):
self.results['actions'].append("Deallocated virtual machine {0}".format(self.name))
self.log("Deallocate virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.deallocate(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deallocating virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def generalize_vm(self):
self.results['actions'].append("Generalize virtual machine {0}".format(self.name))
self.log("Generalize virtual machine {0}".format(self.name))
try:
response = self.compute_client.virtual_machines.generalize(self.resource_group, self.name)
if isinstance(response, LROPoller):
self.get_poller_result(response)
except Exception as exc:
self.fail("Error generalizing virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def remove_autocreated_resources(self, tags):
if tags:
sa_name = tags.get('_own_sa_')
nic_name = tags.get('_own_nic_')
pip_name = tags.get('_own_pip_')
nsg_name = tags.get('_own_nsg_')
if sa_name:
self.delete_storage_account(self.resource_group, sa_name)
if nic_name:
self.delete_nic(self.resource_group, nic_name)
if pip_name:
self.delete_pip(self.resource_group, pip_name)
if nsg_name:
self.delete_nsg(self.resource_group, nsg_name)
def delete_vm(self, vm):
vhd_uris = []
managed_disk_ids = []
nic_names = []
pip_names = []
if 'all_autocreated' not in self.remove_on_absent:
if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
# store the attached vhd info so we can nuke it after the VM is gone
if(vm.storage_profile.os_disk.managed_disk):
self.log('Storing managed disk ID for deletion')
managed_disk_ids.append(vm.storage_profile.os_disk.managed_disk.id)
elif(vm.storage_profile.os_disk.vhd):
self.log('Storing VHD URI for deletion')
vhd_uris.append(vm.storage_profile.os_disk.vhd.uri)
data_disks = vm.storage_profile.data_disks
for data_disk in data_disks:
if data_disk is not None:
if(data_disk.vhd):
vhd_uris.append(data_disk.vhd.uri)
elif(data_disk.managed_disk):
managed_disk_ids.append(data_disk.managed_disk.id)
# FUTURE enable diff mode, move these there...
self.log("VHD URIs to delete: {0}".format(', '.join(vhd_uris)))
self.results['deleted_vhd_uris'] = vhd_uris
self.log("Managed disk IDs to delete: {0}".format(', '.join(managed_disk_ids)))
self.results['deleted_managed_disk_ids'] = managed_disk_ids
if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
# store the attached nic info so we can nuke them after the VM is gone
self.log('Storing NIC names for deletion.')
for interface in vm.network_profile.network_interfaces:
id_dict = azure_id_to_dict(interface.id)
nic_names.append(dict(name=id_dict['networkInterfaces'], resource_group=id_dict['resourceGroups']))
self.log('NIC names to delete {0}'.format(str(nic_names)))
self.results['deleted_network_interfaces'] = nic_names
if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
# also store each nic's attached public IPs and delete after the NIC is gone
for nic_dict in nic_names:
nic = self.get_network_interface(nic_dict['resource_group'], nic_dict['name'])
for ipc in nic.ip_configurations:
if ipc.public_ip_address:
pip_dict = azure_id_to_dict(ipc.public_ip_address.id)
pip_names.append(dict(name=pip_dict['publicIPAddresses'], resource_group=pip_dict['resourceGroups']))
self.log('Public IPs to delete are {0}'.format(str(pip_names)))
self.results['deleted_public_ips'] = pip_names
self.log("Deleting virtual machine {0}".format(self.name))
self.results['actions'].append("Deleted virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.delete(self.resource_group, self.name)
# wait for the poller to finish
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting virtual machine {0} - {1}".format(self.name, str(exc)))
if 'all_autocreated' in self.remove_on_absent:
self.remove_autocreated_resources(vm.tags)
else:
# TODO: parallelize nic, vhd, and public ip deletions with begin_deleting
# TODO: best-effort to keep deleting other linked resources if we encounter an error
if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
self.log('Deleting VHDs')
self.delete_vm_storage(vhd_uris)
self.log('Deleting managed disks')
self.delete_managed_disks(managed_disk_ids)
if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
self.log('Deleting network interfaces')
for nic_dict in nic_names:
self.delete_nic(nic_dict['resource_group'], nic_dict['name'])
if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
self.log('Deleting public IPs')
for pip_dict in pip_names:
self.delete_pip(pip_dict['resource_group'], pip_dict['name'])
return True
def get_network_interface(self, resource_group, name):
try:
nic = self.network_client.network_interfaces.get(resource_group, name)
return nic
except Exception as exc:
self.fail("Error fetching network interface {0} - {1}".format(name, str(exc)))
return True
def delete_nic(self, resource_group, name):
self.log("Deleting network interface {0}".format(name))
self.results['actions'].append("Deleted network interface {0}".format(name))
try:
poller = self.network_client.network_interfaces.delete(resource_group, name)
except Exception as exc:
self.fail("Error deleting network interface {0} - {1}".format(name, str(exc)))
self.get_poller_result(poller)
# Delete doesn't return anything. If we get this far, assume success
return True
def delete_pip(self, resource_group, name):
self.results['actions'].append("Deleted public IP {0}".format(name))
try:
poller = self.network_client.public_ip_addresses.delete(resource_group, name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting {0} - {1}".format(name, str(exc)))
# Delete returns nada. If we get here, assume that all is well.
return True
def delete_nsg(self, resource_group, name):
self.results['actions'].append("Deleted NSG {0}".format(name))
try:
poller = self.network_client.network_security_groups.delete(resource_group, name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting {0} - {1}".format(name, str(exc)))
return True
def delete_managed_disks(self, managed_disk_ids):
for mdi in managed_disk_ids:
try:
poller = self.rm_client.resources.delete_by_id(mdi, '2017-03-30')
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting managed disk {0} - {1}".format(mdi, str(exc)))
return True
def delete_storage_account(self, resource_group, name):
self.log("Delete storage account {0}".format(name))
self.results['actions'].append("Deleted storage account {0}".format(name))
try:
self.storage_client.storage_accounts.delete(self.resource_group, name)
except Exception as exc:
self.fail("Error deleting storage account {0} - {2}".format(name, str(exc)))
return True
def delete_vm_storage(self, vhd_uris):
# FUTURE: figure out a cloud_env indepdendent way to delete these
for uri in vhd_uris:
self.log("Extracting info from blob uri '{0}'".format(uri))
try:
blob_parts = extract_names_from_blob_uri(uri, self._cloud_environment.suffixes.storage_endpoint)
except Exception as exc:
self.fail("Error parsing blob URI {0}".format(str(exc)))
storage_account_name = blob_parts['accountname']
container_name = blob_parts['containername']
blob_name = blob_parts['blobname']
blob_client = self.get_blob_client(self.resource_group, storage_account_name)
self.log("Delete blob {0}:{1}".format(container_name, blob_name))
self.results['actions'].append("Deleted blob {0}:{1}".format(container_name, blob_name))
try:
blob_client.delete_blob(container_name, blob_name)
except Exception as exc:
self.fail("Error deleting blob {0}:{1} - {2}".format(container_name, blob_name, str(exc)))
return True
def get_marketplace_image_version(self):
try:
versions = self.compute_client.virtual_machine_images.list(self.location,
self.image['publisher'],
self.image['offer'],
self.image['sku'])
except Exception as exc:
self.fail("Error fetching image {0} {1} {2} - {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
str(exc)))
if versions and len(versions) > 0:
if self.image['version'] == 'latest':
return versions[len(versions) - 1]
for version in versions:
if version.name == self.image['version']:
return version
self.fail("Error could not find image {0} {1} {2} {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
self.image['version']))
return None
def get_custom_image_reference(self, name, resource_group=None):
try:
if resource_group:
vm_images = self.compute_client.images.list_by_resource_group(resource_group)
else:
vm_images = self.compute_client.images.list()
except Exception as exc:
self.fail("Error fetching custom images from subscription - {0}".format(str(exc)))
for vm_image in vm_images:
if vm_image.name == name:
self.log("Using custom image id {0}".format(vm_image.id))
return self.compute_models.ImageReference(id=vm_image.id)
self.fail("Error could not find image with name {0}".format(name))
return None
def get_availability_set(self, resource_group, name):
try:
return self.compute_client.availability_sets.get(resource_group, name)
except Exception as exc:
self.fail("Error fetching availability set {0} - {1}".format(name, str(exc)))
def get_storage_account(self, name):
try:
account = self.storage_client.storage_accounts.get_properties(self.resource_group,
name)
return account
except Exception as exc:
self.fail("Error fetching storage account {0} - {1}".format(name, str(exc)))
def create_or_update_vm(self, params, remove_autocreated_on_failure):
try:
poller = self.compute_client.virtual_machines.create_or_update(self.resource_group, self.name, params)
self.get_poller_result(poller)
except Exception as exc:
if remove_autocreated_on_failure:
self.remove_autocreated_resources(params.tags)
self.fail("Error creating or updating virtual machine {0} - {1}".format(self.name, str(exc)))
def vm_size_is_valid(self):
'''
Validate self.vm_size against the list of virtual machine sizes available for the account and location.
:return: boolean
'''
try:
sizes = self.compute_client.virtual_machine_sizes.list(self.location)
except Exception as exc:
self.fail("Error retrieving available machine sizes - {0}".format(str(exc)))
for size in sizes:
if size.name == self.vm_size:
return True
return False
def create_default_storage_account(self):
'''
Create a default storage account <vm name>XXXX, where XXXX is a random number. If <vm name>XXXX exists, use it.
Otherwise, create one.
:return: storage account object
'''
account = None
valid_name = False
if self.tags is None:
self.tags = {}
# Attempt to find a valid storage account name
storage_account_name_base = re.sub('[^a-zA-Z0-9]', '', self.name[:20].lower())
for i in range(0, 5):
rand = random.randrange(1000, 9999)
storage_account_name = storage_account_name_base + str(rand)
if self.check_storage_account_name(storage_account_name):
valid_name = True
break
if not valid_name:
self.fail("Failed to create a unique storage account name for {0}. Try using a different VM name."
.format(self.name))
try:
account = self.storage_client.storage_accounts.get_properties(self.resource_group, storage_account_name)
except CloudError:
pass
if account:
self.log("Storage account {0} found.".format(storage_account_name))
self.check_provisioning_state(account)
return account
sku = self.storage_models.Sku(self.storage_models.SkuName.standard_lrs)
sku.tier = self.storage_models.SkuTier.standard
kind = self.storage_models.Kind.storage
parameters = self.storage_models.StorageAccountCreateParameters(sku, kind, self.location)
self.log("Creating storage account {0} in location {1}".format(storage_account_name, self.location))
self.results['actions'].append("Created storage account {0}".format(storage_account_name))
try:
poller = self.storage_client.storage_accounts.create(self.resource_group, storage_account_name, parameters)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Failed to create storage account: {0} - {1}".format(storage_account_name, str(exc)))
self.tags['_own_sa_'] = storage_account_name
return self.get_storage_account(storage_account_name)
def check_storage_account_name(self, name):
self.log("Checking storage account name availability for {0}".format(name))
try:
response = self.storage_client.storage_accounts.check_name_availability(name)
if response.reason == 'AccountNameInvalid':
raise Exception("Invalid default storage account name: {0}".format(name))
except Exception as exc:
self.fail("Error checking storage account name availability for {0} - {1}".format(name, str(exc)))
return response.name_available
def create_default_nic(self):
'''
Create a default Network Interface <vm name>01. Requires an existing virtual network
with one subnet. If NIC <vm name>01 exists, use it. Otherwise, create one.
:return: NIC object
'''
network_interface_name = self.name + '01'
nic = None
if self.tags is None:
self.tags = {}
self.log("Create default NIC {0}".format(network_interface_name))
self.log("Check to see if NIC {0} exists".format(network_interface_name))
try:
nic = self.network_client.network_interfaces.get(self.resource_group, network_interface_name)
except CloudError:
pass
if nic:
self.log("NIC {0} found.".format(network_interface_name))
self.check_provisioning_state(nic)
return nic
self.log("NIC {0} does not exist.".format(network_interface_name))
virtual_network_resource_group = None
if self.virtual_network_resource_group:
virtual_network_resource_group = self.virtual_network_resource_group
else:
virtual_network_resource_group = self.resource_group
if self.virtual_network_name:
try:
self.network_client.virtual_networks.list(virtual_network_resource_group, self.virtual_network_name)
virtual_network_name = self.virtual_network_name
except CloudError as exc:
self.fail("Error: fetching virtual network {0} - {1}".format(self.virtual_network_name, str(exc)))
else:
# Find a virtual network
no_vnets_msg = "Error: unable to find virtual network in resource group {0}. A virtual network " \
"with at least one subnet must exist in order to create a NIC for the virtual " \
"machine.".format(virtual_network_resource_group)
virtual_network_name = None
try:
vnets = self.network_client.virtual_networks.list(virtual_network_resource_group)
except CloudError:
self.log('cloud error!')
self.fail(no_vnets_msg)
for vnet in vnets:
virtual_network_name = vnet.name
self.log('vnet name: {0}'.format(vnet.name))
break
if not virtual_network_name:
self.fail(no_vnets_msg)
if self.subnet_name:
try:
subnet = self.network_client.subnets.get(virtual_network_resource_group, virtual_network_name, self.subnet_name)
subnet_id = subnet.id
except Exception as exc:
self.fail("Error: fetching subnet {0} - {1}".format(self.subnet_name, str(exc)))
else:
no_subnets_msg = "Error: unable to find a subnet in virtual network {0}. A virtual network " \
"with at least one subnet must exist in order to create a NIC for the virtual " \
"machine.".format(virtual_network_name)
subnet_id = None
try:
subnets = self.network_client.subnets.list(virtual_network_resource_group, virtual_network_name)
except CloudError:
self.fail(no_subnets_msg)
for subnet in subnets:
subnet_id = subnet.id
self.log('subnet id: {0}'.format(subnet_id))
break
if not subnet_id:
self.fail(no_subnets_msg)
pip = None
if self.public_ip_allocation_method != 'Disabled':
self.results['actions'].append('Created default public IP {0}'.format(self.name + '01'))
sku = self.network_models.PublicIPAddressSku(name="Standard") if self.zones else None
pip_info = self.create_default_pip(self.resource_group, self.location, self.name + '01', self.public_ip_allocation_method, sku=sku)
pip = self.network_models.PublicIPAddress(id=pip_info.id, location=pip_info.location, resource_guid=pip_info.resource_guid, sku=sku)
self.tags['_own_pip_'] = self.name + '01'
self.results['actions'].append('Created default security group {0}'.format(self.name + '01'))
group = self.create_default_securitygroup(self.resource_group, self.location, self.name + '01', self.os_type,
self.open_ports)
self.tags['_own_nsg_'] = self.name + '01'
parameters = self.network_models.NetworkInterface(
location=self.location,
ip_configurations=[
self.network_models.NetworkInterfaceIPConfiguration(
private_ip_allocation_method='Dynamic',
)
]
)
parameters.ip_configurations[0].subnet = self.network_models.Subnet(id=subnet_id)
parameters.ip_configurations[0].name = 'default'
parameters.network_security_group = self.network_models.NetworkSecurityGroup(id=group.id,
location=group.location,
resource_guid=group.resource_guid)
parameters.ip_configurations[0].public_ip_address = pip
self.log("Creating NIC {0}".format(network_interface_name))
self.log(self.serialize_obj(parameters, 'NetworkInterface'), pretty_print=True)
self.results['actions'].append("Created NIC {0}".format(network_interface_name))
try:
poller = self.network_client.network_interfaces.create_or_update(self.resource_group,
network_interface_name,
parameters)
new_nic = self.get_poller_result(poller)
self.tags['_own_nic_'] = network_interface_name
except Exception as exc:
self.fail("Error creating network interface {0} - {1}".format(network_interface_name, str(exc)))
return new_nic
def parse_network_interface(self, nic):
nic = self.parse_resource_to_dict(nic)
if 'name' not in nic:
self.fail("Invalid network interface {0}".format(str(nic)))
return format_resource_id(val=nic['name'],
subscription_id=nic['subscription_id'],
resource_group=nic['resource_group'],
namespace='Microsoft.Network',
types='networkInterfaces')
def main():
AzureRMVirtualMachine()
if __name__ == '__main__':
main()
| gpl-3.0 | -8,855,567,878,221,242,000 | 46.33641 | 217 | 0.541406 | false |
DBrianKimmel/PyHouse | Project/src/Modules/Computer/Web/web_irrigation.py | 1 | 1855 | """
@name: Modules/Computer/Web/web_irrigation.py
@author: D. Brian Kimmel
@contact: [email protected]
@copyright: (c) 2016-2020 by D. Brian Kimmel
@note: Created on Aug 22, 2016
@license: MIT License
@summary:
"""
__updated__ = '2019-12-30'
# Import system type stuff
import os
# Import PyMh files and modules.
from Modules.Core.data_objects import IrrigationData
from Modules.Computer.Web.web_utils import GetJSONHouseInfo
from Modules.Core.Utilities import json_tools
# Handy helper for finding external resources nearby.
webpath = os.path.join(os.path.split(__file__)[0])
templatepath = os.path.join(webpath, 'template')
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.WebIrrigation ')
class IrrigationElement(athena.LiveElement):
""" a 'live' internet element.
"""
docFactory = loaders.xmlfile(os.path.join(templatepath, 'irrigationElement.html'))
jsClass = u'irrigation.IrrigationWidget'
def __init__(self, p_workspace_obj, _p_params):
self.m_workspace_obj = p_workspace_obj
self.m_pyhouse_obj = p_workspace_obj.m_pyhouse_obj
@athena.expose
def getHouseData(self):
l_computer = GetJSONHouseInfo(self.m_pyhouse_obj)
return l_computer
@athena.expose
def saveIrrigationData(self, p_json):
"""Internet data is returned, so update the computer info.
"""
l_json = json_tools.decode_json_unicode(p_json)
l_ix = int(l_json['Key'])
_l_system = l_json['Name']
try:
l_obj = self.m_pyhouse_obj.House.Irrigation[l_ix]
except KeyError:
l_obj = IrrigationData()
l_obj.DynDns = {}
l_obj.Name = l_json['Name']
l_obj.Key = 0
l_obj.Active = True
self.m_pyhouse_obj.House.Irrigation[l_ix] = l_obj
# ## END DBK
| mit | -6,909,008,324,557,919,000 | 28.919355 | 86 | 0.657682 | false |
rossengeorgiev/aprs-python | aprslib/parsing/mice.py | 1 | 6347 | import re
import math
from aprslib import base91
from aprslib.exceptions import ParseError
from aprslib.parsing.common import parse_dao
from aprslib.parsing.telemetry import parse_comment_telemetry
__all__ = [
'parse_mice',
]
# Mic-e message type table
MTYPE_TABLE_STD = {
"111": "M0: Off Duty",
"110": "M1: En Route",
"101": "M2: In Service",
"100": "M3: Returning",
"011": "M4: Committed",
"010": "M5: Special",
"001": "M6: Priority",
"000": "Emergency",
}
MTYPE_TABLE_CUSTOM = {
"111": "C0: Custom-0",
"110": "C1: Custom-1",
"101": "C2: Custom-2",
"100": "C3: Custom-3",
"011": "C4: Custom-4",
"010": "C5: Custom-5",
"001": "C6: Custom-6",
"000": "Emergency",
}
# Mic-encoded packet
#
# 'lllc/s$/......... Mic-E no message capability
# 'lllc/s$/>........ Mic-E message capability
# `lllc/s$/>........ Mic-E old posit
def parse_mice(dstcall, body):
parsed = {'format': 'mic-e'}
dstcall = dstcall.split('-')[0]
# verify mic-e format
if len(dstcall) != 6:
raise ParseError("dstcall has to be 6 characters")
if len(body) < 8:
raise ParseError("packet data field is too short")
if not re.match(r"^[0-9A-Z]{3}[0-9L-Z]{3}$", dstcall):
raise ParseError("invalid dstcall")
if not re.match(r"^[&-\x7f][&-a][\x1c-\x7f]{2}[\x1c-\x7d]"
r"[\x1c-\x7f][\x21-\x7e][\/\\0-9A-Z]", body):
raise ParseError("invalid data format")
# get symbol table and symbol
parsed.update({
'symbol': body[6],
'symbol_table': body[7]
})
# parse latitude
# the routine translates each characters into a lat digit as described in
# 'Mic-E Destination Address Field Encoding' table
tmpdstcall = ""
for i in dstcall:
if i in "KLZ": # spaces
tmpdstcall += " "
elif ord(i) > 76: # P-Y
tmpdstcall += chr(ord(i) - 32)
elif ord(i) > 57: # A-J
tmpdstcall += chr(ord(i) - 17)
else: # 0-9
tmpdstcall += i
# determine position ambiguity
match = re.findall(r"^\d+( *)$", tmpdstcall)
if not match:
raise ParseError("invalid latitude ambiguity")
posambiguity = len(match[0])
parsed.update({
'posambiguity': posambiguity
})
# adjust the coordinates be in center of ambiguity box
tmpdstcall = list(tmpdstcall)
if posambiguity > 0:
if posambiguity >= 4:
tmpdstcall[2] = '3'
else:
tmpdstcall[6 - posambiguity] = '5'
tmpdstcall = "".join(tmpdstcall)
latminutes = float(("%s.%s" % (tmpdstcall[2:4], tmpdstcall[4:6])).replace(" ", "0"))
latitude = int(tmpdstcall[0:2]) + (latminutes / 60.0)
# determine the sign N/S
latitude = -latitude if ord(dstcall[3]) <= 0x4c else latitude
parsed.update({
'latitude': latitude
})
# parse message bits
mbits = re.sub(r"[0-9L]", "0", dstcall[0:3])
mbits = re.sub(r"[P-Z]", "1", mbits)
mbits = re.sub(r"[A-K]", "2", mbits)
parsed.update({
'mbits': mbits
})
# resolve message type
if mbits.find("2") > -1:
parsed.update({
'mtype': MTYPE_TABLE_CUSTOM[mbits.replace("2", "1")]
})
else:
parsed.update({
'mtype': MTYPE_TABLE_STD[mbits]
})
# parse longitude
longitude = ord(body[0]) - 28 # decimal part of longitude
longitude += 100 if ord(dstcall[4]) >= 0x50 else 0 # apply lng offset
longitude += -80 if longitude >= 180 and longitude <= 189 else 0
longitude += -190 if longitude >= 190 and longitude <= 199 else 0
# long minutes
lngminutes = ord(body[1]) - 28.0
lngminutes += -60 if lngminutes >= 60 else 0
# + (long hundredths of minutes)
lngminutes += ((ord(body[2]) - 28.0) / 100.0)
# apply position ambiguity
# routines adjust longitude to center of the ambiguity box
if posambiguity == 4:
lngminutes = 30
elif posambiguity == 3:
lngminutes = (math.floor(lngminutes/10) + 0.5) * 10
elif posambiguity == 2:
lngminutes = math.floor(lngminutes) + 0.5
elif posambiguity == 1:
lngminutes = (math.floor(lngminutes*10) + 0.5) / 10.0
elif posambiguity != 0:
raise ParseError("Unsupported position ambiguity: %d" % posambiguity)
longitude += lngminutes / 60.0
# apply E/W sign
longitude = 0 - longitude if ord(dstcall[5]) >= 0x50 else longitude
parsed.update({
'longitude': longitude
})
# parse speed and course
speed = (ord(body[3]) - 28) * 10
course = ord(body[4]) - 28
quotient = int(course / 10.0)
course += -(quotient * 10)
course = course*100 + ord(body[5]) - 28
speed += quotient
speed += -800 if speed >= 800 else 0
course += -400 if course >= 400 else 0
speed *= 1.852 # knots * 1.852 = kmph
parsed.update({
'speed': speed,
'course': course
})
# the rest of the packet can contain telemetry and comment
if len(body) > 8:
body = body[8:]
# check for optional 2 or 5 channel telemetry
match = re.findall(r"^('[0-9a-f]{10}|`[0-9a-f]{4})(.*)$", body)
if match:
hexdata, body = match[0]
hexdata = hexdata[1:] # remove telemtry flag
channels = int(len(hexdata) / 2) # determine number of channels
hexdata = int(hexdata, 16) # convert hex to int
telemetry = []
for i in range(channels):
telemetry.insert(0, int(hexdata >> 8*i & 255))
parsed.update({'telemetry': telemetry})
# check for optional altitude
match = re.findall(r"^(.*)([!-{]{3})\}(.*)$", body)
if match:
body, altitude, extra = match[0]
altitude = base91.to_decimal(altitude) - 10000
parsed.update({'altitude': altitude})
body = body + extra
# attempt to parse comment telemetry
body, telemetry = parse_comment_telemetry(body)
parsed.update(telemetry)
# parse DAO extention
body = parse_dao(body, parsed)
# rest is a comment
parsed.update({'comment': body.strip(' ')})
return ('', parsed)
| gpl-2.0 | 1,449,387,845,157,458,200 | 27.85 | 88 | 0.55349 | false |
zhaogaolong/oneFinger | storage/models.py | 1 | 1362 | from django.db import models
from asset.models import Host
# Create your models here.
# ####ceph##########
status_level = (
("up", "up"),
("warning", "warning"),
("down", "down"),
)
mode_status_level = (
("up", "up"),
("warning", "warning"),
("critical", "critical"),
("down", "down"),
)
class CephStatus(models.Model):
# this is a ceph service tables
status = models.CharField(choices=mode_status_level, max_length=64)
monitor_status = models.CharField(choices=status_level, max_length=64)
osd_status = models.CharField(choices=status_level, max_length=64)
def __unicode__(self):
return self.status
class CephMonitorStatus(models.Model):
# this is a ceph monitor service tables
host = models.ForeignKey(Host)
mon_id = models.IntegerField()
status = models.CharField(choices=status_level,
max_length=64,
blank=True,
null=True)
def __unicode__(self):
return self.host.hostname
class CephOsdStatus(models.Model):
# this is a ceph osd service tables
osd_name = models.CharField(max_length=64)
host = models.ForeignKey(Host)
status = models.CharField(choices=status_level, max_length=64)
def __unicode__(self):
return self.osd_name
| apache-2.0 | -2,960,812,635,545,907,000 | 25.705882 | 74 | 0.601322 | false |
ImaginationForPeople/alembic | tests/test_script_production.py | 1 | 16132 | from alembic.testing.fixtures import TestBase
from alembic.testing import eq_, ne_, assert_raises_message
from alembic.testing.env import clear_staging_env, staging_env, \
_get_staging_directory, _no_sql_testing_config, env_file_fixture, \
script_file_fixture, _testing_config, _sqlite_testing_config, \
three_rev_fixture, _multi_dir_testing_config, write_script,\
_sqlite_file_db
from alembic import command
from alembic.script import ScriptDirectory
from alembic.environment import EnvironmentContext
from alembic.testing import mock
from alembic import util
from alembic.operations import ops
import os
import datetime
import sqlalchemy as sa
from sqlalchemy.engine.reflection import Inspector
env, abc, def_ = None, None, None
class GeneralOrderedTests(TestBase):
def setUp(self):
global env
env = staging_env()
def tearDown(self):
clear_staging_env()
def test_steps(self):
self._test_001_environment()
self._test_002_rev_ids()
self._test_003_api_methods_clean()
self._test_004_rev()
self._test_005_nextrev()
self._test_006_from_clean_env()
self._test_007_long_name()
self._test_008_long_name_configurable()
def _test_001_environment(self):
assert_set = set(['env.py', 'script.py.mako', 'README'])
eq_(
assert_set.intersection(os.listdir(env.dir)),
assert_set
)
def _test_002_rev_ids(self):
global abc, def_
abc = util.rev_id()
def_ = util.rev_id()
ne_(abc, def_)
def _test_003_api_methods_clean(self):
eq_(env.get_heads(), [])
eq_(env.get_base(), None)
def _test_004_rev(self):
script = env.generate_revision(abc, "this is a message", refresh=True)
eq_(script.doc, "this is a message")
eq_(script.revision, abc)
eq_(script.down_revision, None)
assert os.access(
os.path.join(env.dir, 'versions',
'%s_this_is_a_message.py' % abc), os.F_OK)
assert callable(script.module.upgrade)
eq_(env.get_heads(), [abc])
eq_(env.get_base(), abc)
def _test_005_nextrev(self):
script = env.generate_revision(
def_, "this is the next rev", refresh=True)
assert os.access(
os.path.join(
env.dir, 'versions',
'%s_this_is_the_next_rev.py' % def_), os.F_OK)
eq_(script.revision, def_)
eq_(script.down_revision, abc)
eq_(env.get_revision(abc).nextrev, set([def_]))
assert script.module.down_revision == abc
assert callable(script.module.upgrade)
assert callable(script.module.downgrade)
eq_(env.get_heads(), [def_])
eq_(env.get_base(), abc)
def _test_006_from_clean_env(self):
# test the environment so far with a
# new ScriptDirectory instance.
env = staging_env(create=False)
abc_rev = env.get_revision(abc)
def_rev = env.get_revision(def_)
eq_(abc_rev.nextrev, set([def_]))
eq_(abc_rev.revision, abc)
eq_(def_rev.down_revision, abc)
eq_(env.get_heads(), [def_])
eq_(env.get_base(), abc)
def _test_007_long_name(self):
rid = util.rev_id()
env.generate_revision(rid,
"this is a really long name with "
"lots of characters and also "
"I'd like it to\nhave\nnewlines")
assert os.access(
os.path.join(
env.dir, 'versions',
'%s_this_is_a_really_long_name_with_lots_of_.py' % rid),
os.F_OK)
def _test_008_long_name_configurable(self):
env.truncate_slug_length = 60
rid = util.rev_id()
env.generate_revision(rid,
"this is a really long name with "
"lots of characters and also "
"I'd like it to\nhave\nnewlines")
assert os.access(
os.path.join(env.dir, 'versions',
'%s_this_is_a_really_long_name_with_lots_'
'of_characters_and_also_.py' % rid),
os.F_OK)
class ScriptNamingTest(TestBase):
@classmethod
def setup_class(cls):
_testing_config()
@classmethod
def teardown_class(cls):
clear_staging_env()
def test_args(self):
script = ScriptDirectory(
_get_staging_directory(),
file_template="%(rev)s_%(slug)s_"
"%(year)s_%(month)s_"
"%(day)s_%(hour)s_"
"%(minute)s_%(second)s"
)
create_date = datetime.datetime(2012, 7, 25, 15, 8, 5)
eq_(
script._rev_path(
script.versions, "12345", "this is a message", create_date),
os.path.abspath(
"%s/versions/12345_this_is_a_"
"message_2012_7_25_15_8_5.py" % _get_staging_directory())
)
class RevisionCommandTest(TestBase):
def setUp(self):
self.env = staging_env()
self.cfg = _sqlite_testing_config()
self.a, self.b, self.c = three_rev_fixture(self.cfg)
def tearDown(self):
clear_staging_env()
def test_create_script_basic(self):
rev = command.revision(self.cfg, message="some message")
script = ScriptDirectory.from_config(self.cfg)
rev = script.get_revision(rev.revision)
eq_(rev.down_revision, self.c)
assert "some message" in rev.doc
def test_create_script_splice(self):
rev = command.revision(
self.cfg, message="some message", head=self.b, splice=True)
script = ScriptDirectory.from_config(self.cfg)
rev = script.get_revision(rev.revision)
eq_(rev.down_revision, self.b)
assert "some message" in rev.doc
eq_(set(script.get_heads()), set([rev.revision, self.c]))
def test_create_script_missing_splice(self):
assert_raises_message(
util.CommandError,
"Revision %s is not a head revision; please specify --splice "
"to create a new branch from this revision" % self.b,
command.revision,
self.cfg, message="some message", head=self.b
)
def test_create_script_branches(self):
rev = command.revision(
self.cfg, message="some message", branch_label="foobar")
script = ScriptDirectory.from_config(self.cfg)
rev = script.get_revision(rev.revision)
eq_(script.get_revision("foobar"), rev)
def test_create_script_branches_old_template(self):
script = ScriptDirectory.from_config(self.cfg)
with open(os.path.join(script.dir, "script.py.mako"), "w") as file_:
file_.write(
"<%text>#</%text> ${message}\n"
"revision = ${repr(up_revision)}\n"
"down_revision = ${repr(down_revision)}\n"
"def upgrade():\n"
" ${upgrades if upgrades else 'pass'}\n\n"
"def downgrade():\n"
" ${downgrade if downgrades else 'pass'}\n\n"
)
# works OK if no branch names
command.revision(self.cfg, message="some message")
assert_raises_message(
util.CommandError,
r"Version \w+ specified branch_labels foobar, "
r"however the migration file .+?\b does not have them; have you "
"upgraded your script.py.mako to include the 'branch_labels' "
r"section\?",
command.revision,
self.cfg, message="some message", branch_label="foobar"
)
class CustomizeRevisionTest(TestBase):
def setUp(self):
self.env = staging_env()
self.cfg = _multi_dir_testing_config()
self.cfg.set_main_option("revision_environment", "true")
script = ScriptDirectory.from_config(self.cfg)
# MARKMARK
self.model1 = util.rev_id()
self.model2 = util.rev_id()
self.model3 = util.rev_id()
for model, name in [
(self.model1, "model1"),
(self.model2, "model2"),
(self.model3, "model3"),
]:
script.generate_revision(
model, name, refresh=True,
version_path=os.path.join(_get_staging_directory(), name),
head="base")
write_script(script, model, """\
"%s"
revision = '%s'
down_revision = None
branch_labels = ['%s']
from alembic import op
def upgrade():
pass
def downgrade():
pass
""" % (name, model, name))
def tearDown(self):
clear_staging_env()
def _env_fixture(self, fn, target_metadata):
self.engine = engine = _sqlite_file_db()
def run_env(self):
from alembic import context
with engine.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
process_revision_directives=fn)
with context.begin_transaction():
context.run_migrations()
return mock.patch(
"alembic.script.base.ScriptDirectory.run_env",
run_env
)
def test_new_locations_no_autogen(self):
m = sa.MetaData()
def process_revision_directives(context, rev, generate_revisions):
generate_revisions[:] = [
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(),
ops.DowngradeOps(),
version_path=os.path.join(
_get_staging_directory(), "model1"),
head="model1@head"
),
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(),
ops.DowngradeOps(),
version_path=os.path.join(
_get_staging_directory(), "model2"),
head="model2@head"
),
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(),
ops.DowngradeOps(),
version_path=os.path.join(
_get_staging_directory(), "model3"),
head="model3@head"
),
]
with self._env_fixture(process_revision_directives, m):
revs = command.revision(self.cfg, message="some message")
script = ScriptDirectory.from_config(self.cfg)
for rev, model in [
(revs[0], "model1"),
(revs[1], "model2"),
(revs[2], "model3"),
]:
rev_script = script.get_revision(rev.revision)
eq_(
rev_script.path,
os.path.abspath(os.path.join(
_get_staging_directory(), model,
"%s_.py" % (rev_script.revision, )
))
)
assert os.path.exists(rev_script.path)
def test_autogen(self):
m = sa.MetaData()
sa.Table('t', m, sa.Column('x', sa.Integer))
def process_revision_directives(context, rev, generate_revisions):
existing_upgrades = generate_revisions[0].upgrade_ops
existing_downgrades = generate_revisions[0].downgrade_ops
# model1 will run the upgrades, e.g. create the table,
# model2 will run the downgrades as upgrades, e.g. drop
# the table again
generate_revisions[:] = [
ops.MigrationScript(
util.rev_id(),
existing_upgrades,
ops.DowngradeOps(),
version_path=os.path.join(
_get_staging_directory(), "model1"),
head="model1@head"
),
ops.MigrationScript(
util.rev_id(),
existing_downgrades,
ops.DowngradeOps(),
version_path=os.path.join(
_get_staging_directory(), "model2"),
head="model2@head"
)
]
with self._env_fixture(process_revision_directives, m):
command.upgrade(self.cfg, "heads")
eq_(
Inspector.from_engine(self.engine).get_table_names(),
["alembic_version"]
)
command.revision(
self.cfg, message="some message",
autogenerate=True)
command.upgrade(self.cfg, "model1@head")
eq_(
Inspector.from_engine(self.engine).get_table_names(),
["alembic_version", "t"]
)
command.upgrade(self.cfg, "model2@head")
eq_(
Inspector.from_engine(self.engine).get_table_names(),
["alembic_version"]
)
class MultiDirRevisionCommandTest(TestBase):
def setUp(self):
self.env = staging_env()
self.cfg = _multi_dir_testing_config()
def tearDown(self):
clear_staging_env()
def test_multiple_dir_no_bases(self):
assert_raises_message(
util.CommandError,
"Multiple version locations present, please specify "
"--version-path",
command.revision, self.cfg, message="some message"
)
def test_multiple_dir_no_bases_invalid_version_path(self):
assert_raises_message(
util.CommandError,
"Path foo/bar/ is not represented in current version locations",
command.revision,
self.cfg, message="x",
version_path=os.path.join("foo/bar/")
)
def test_multiple_dir_no_bases_version_path(self):
script = command.revision(
self.cfg, message="x",
version_path=os.path.join(_get_staging_directory(), "model1"))
assert os.access(script.path, os.F_OK)
def test_multiple_dir_chooses_base(self):
command.revision(
self.cfg, message="x",
head="base",
version_path=os.path.join(_get_staging_directory(), "model1"))
script2 = command.revision(
self.cfg, message="y",
head="base",
version_path=os.path.join(_get_staging_directory(), "model2"))
script3 = command.revision(
self.cfg, message="y2",
head=script2.revision)
eq_(
os.path.dirname(script3.path),
os.path.abspath(os.path.join(_get_staging_directory(), "model2"))
)
assert os.access(script3.path, os.F_OK)
class TemplateArgsTest(TestBase):
def setUp(self):
staging_env()
self.cfg = _no_sql_testing_config(
directives="\nrevision_environment=true\n"
)
def tearDown(self):
clear_staging_env()
def test_args_propagate(self):
config = _no_sql_testing_config()
script = ScriptDirectory.from_config(config)
template_args = {"x": "x1", "y": "y1", "z": "z1"}
env = EnvironmentContext(
config,
script,
template_args=template_args
)
env.configure(dialect_name="sqlite",
template_args={"y": "y2", "q": "q1"})
eq_(
template_args,
{"x": "x1", "y": "y2", "z": "z1", "q": "q1"}
)
def test_tmpl_args_revision(self):
env_file_fixture("""
context.configure(dialect_name='sqlite', template_args={"somearg":"somevalue"})
""")
script_file_fixture("""
# somearg: ${somearg}
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
""")
command.revision(self.cfg, message="some rev")
script = ScriptDirectory.from_config(self.cfg)
rev = script.get_revision('head')
with open(rev.path) as f:
text = f.read()
assert "somearg: somevalue" in text
| mit | -4,762,082,893,149,908,000 | 32.193416 | 79 | 0.536511 | false |
pattyjogal/wy_ctf_website | config/urls.py | 1 | 1936 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include('wy_ctf_website.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^training/', include('wy_ctf_website.training.urls', namespace='training')),
url(r'^tools/', include('wy_ctf_website.tools.urls', namespace='tools')),
url(r'^willchan/', include('wy_ctf_website.willchan.urls', namespace='willchan')),
url(r'^docmo/', TemplateView.as_view(template_name='training/docmo.html'), name='docmo'),
# OK SO I'M BEING A BAD BOY AND RUNNING MY LATEX BOT OFF OF THE SERVER SUE ME
url(r'^latex/', include('wy_ctf_website.latex.urls', namespace='latex'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
| mit | -8,947,484,937,655,502,000 | 45.095238 | 110 | 0.685434 | false |
mjs7231/python-plexapi | plexapi/video.py | 1 | 36601 | # -*- coding: utf-8 -*-
import os
from urllib.parse import quote_plus, urlencode
from plexapi import media, utils, settings, library
from plexapi.base import Playable, PlexPartialObject
from plexapi.exceptions import BadRequest, NotFound
class Video(PlexPartialObject):
""" Base class for all video objects including :class:`~plexapi.video.Movie`,
:class:`~plexapi.video.Show`, :class:`~plexapi.video.Season`,
:class:`~plexapi.video.Episode`.
Attributes:
addedAt (datetime): Datetime this item was added to the library.
key (str): API URL (/library/metadata/<ratingkey>).
lastViewedAt (datetime): Datetime item was last accessed.
librarySectionID (int): :class:`~plexapi.library.LibrarySection` ID.
listType (str): Hardcoded as 'audio' (useful for search filters).
ratingKey (int): Unique key identifying this item.
summary (str): Summary of the artist, track, or album.
thumb (str): URL to thumbnail image.
title (str): Artist, Album or Track title. (Jason Mraz, We Sing, Lucky, etc.)
titleSort (str): Title to use when sorting (defaults to title).
type (str): 'artist', 'album', or 'track'.
updatedAt (datatime): Datetime this item was updated.
viewCount (int): Count of times this item was accessed.
"""
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.listType = 'video'
self.addedAt = utils.toDatetime(data.attrib.get('addedAt'))
self.key = data.attrib.get('key', '')
self.lastViewedAt = utils.toDatetime(data.attrib.get('lastViewedAt'))
self.librarySectionID = data.attrib.get('librarySectionID')
self.ratingKey = utils.cast(int, data.attrib.get('ratingKey'))
self.summary = data.attrib.get('summary')
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.titleSort = data.attrib.get('titleSort', self.title)
self.type = data.attrib.get('type')
self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt'))
self.viewCount = utils.cast(int, data.attrib.get('viewCount', 0))
@property
def isWatched(self):
""" Returns True if this video is watched. """
return bool(self.viewCount > 0) if self.viewCount else False
@property
def thumbUrl(self):
""" Return the first first thumbnail url starting on
the most specific thumbnail for that item.
"""
thumb = self.firstAttr('thumb', 'parentThumb', 'granparentThumb')
return self._server.url(thumb, includeToken=True) if thumb else None
@property
def artUrl(self):
""" Return the first first art url starting on the most specific for that item."""
art = self.firstAttr('art', 'grandparentArt')
return self._server.url(art, includeToken=True) if art else None
def url(self, part):
""" Returns the full url for something. Typically used for getting a specific image. """
return self._server.url(part, includeToken=True) if part else None
def markWatched(self):
""" Mark video as watched. """
key = '/:/scrobble?key=%s&identifier=com.plexapp.plugins.library' % self.ratingKey
self._server.query(key)
self.reload()
def markUnwatched(self):
""" Mark video unwatched. """
key = '/:/unscrobble?key=%s&identifier=com.plexapp.plugins.library' % self.ratingKey
self._server.query(key)
self.reload()
def rate(self, rate):
""" Rate video. """
key = '/:/rate?key=%s&identifier=com.plexapp.plugins.library&rating=%s' % (self.ratingKey, rate)
self._server.query(key)
self.reload()
def _defaultSyncTitle(self):
""" Returns str, default title for a new syncItem. """
return self.title
def subtitleStreams(self):
""" Returns a list of :class:`~plexapi.media.SubtitleStream` objects for all MediaParts. """
streams = []
parts = self.iterParts()
for part in parts:
streams += part.subtitleStreams()
return streams
def uploadSubtitles(self, filepath):
""" Upload Subtitle file for video. """
url = '%s/subtitles' % self.key
filename = os.path.basename(filepath)
subFormat = os.path.splitext(filepath)[1][1:]
with open(filepath, 'rb') as subfile:
params = {'title': filename,
'format': subFormat
}
headers = {'Accept': 'text/plain, */*'}
self._server.query(url, self._server._session.post, data=subfile, params=params, headers=headers)
def removeSubtitles(self, streamID=None, streamTitle=None):
""" Remove Subtitle from movie's subtitles listing.
Note: If subtitle file is located inside video directory it will bbe deleted.
Files outside of video directory are not effected.
"""
for stream in self.subtitleStreams():
if streamID == stream.id or streamTitle == stream.title:
self._server.query(stream.key, self._server._session.delete)
def optimize(self, title=None, target="", targetTagID=None, locationID=-1, policyScope='all',
policyValue="", policyUnwatched=0, videoQuality=None, deviceProfile=None):
""" Optimize item
locationID (int): -1 in folder with orginal items
2 library path
target (str): custom quality name.
if none provided use "Custom: {deviceProfile}"
targetTagID (int): Default quality settings
1 Mobile
2 TV
3 Original Quality
deviceProfile (str): Android, IOS, Universal TV, Universal Mobile, Windows Phone,
Windows, Xbox One
Example:
Optimize for Mobile
item.optimize(targetTagID="Mobile") or item.optimize(targetTagID=1")
Optimize for Android 10 MBPS 1080p
item.optimize(deviceProfile="Android", videoQuality=10)
Optimize for IOS Original Quality
item.optimize(deviceProfile="IOS", videoQuality=-1)
* see sync.py VIDEO_QUALITIES for additional information for using videoQuality
"""
tagValues = [1, 2, 3]
tagKeys = ["Mobile", "TV", "Original Quality"]
tagIDs = tagKeys + tagValues
if targetTagID not in tagIDs and (deviceProfile is None or videoQuality is None):
raise BadRequest('Unexpected or missing quality profile.')
if isinstance(targetTagID, str):
tagIndex = tagKeys.index(targetTagID)
targetTagID = tagValues[tagIndex]
if title is None:
title = self.title
backgroundProcessing = self.fetchItem('/playlists?type=42')
key = '%s/items?' % backgroundProcessing.key
params = {
'Item[type]': 42,
'Item[target]': target,
'Item[targetTagID]': targetTagID if targetTagID else '',
'Item[locationID]': locationID,
'Item[Policy][scope]': policyScope,
'Item[Policy][value]': policyValue,
'Item[Policy][unwatched]': policyUnwatched
}
if deviceProfile:
params['Item[Device][profile]'] = deviceProfile
if videoQuality:
from plexapi.sync import MediaSettings
mediaSettings = MediaSettings.createVideo(videoQuality)
params['Item[MediaSettings][videoQuality]'] = mediaSettings.videoQuality
params['Item[MediaSettings][videoResolution]'] = mediaSettings.videoResolution
params['Item[MediaSettings][maxVideoBitrate]'] = mediaSettings.maxVideoBitrate
params['Item[MediaSettings][audioBoost]'] = ''
params['Item[MediaSettings][subtitleSize]'] = ''
params['Item[MediaSettings][musicBitrate]'] = ''
params['Item[MediaSettings][photoQuality]'] = ''
titleParam = {'Item[title]': title}
section = self._server.library.sectionByID(self.librarySectionID)
params['Item[Location][uri]'] = 'library://' + section.uuid + '/item/' + \
quote_plus(self.key + '?includeExternalMedia=1')
data = key + urlencode(params) + '&' + urlencode(titleParam)
return self._server.query(data, method=self._server._session.put)
def sync(self, videoQuality, client=None, clientId=None, limit=None, unwatched=False, title=None):
""" Add current video (movie, tv-show, season or episode) as sync item for specified device.
See :func:`plexapi.myplex.MyPlexAccount.sync()` for possible exceptions.
Parameters:
videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in
:mod:`plexapi.sync` module.
client (:class:`plexapi.myplex.MyPlexDevice`): sync destination, see
:func:`plexapi.myplex.MyPlexAccount.sync`.
clientId (str): sync destination, see :func:`plexapi.myplex.MyPlexAccount.sync`.
limit (int): maximum count of items to sync, unlimited if `None`.
unwatched (bool): if `True` watched videos wouldn't be synced.
title (str): descriptive title for the new :class:`plexapi.sync.SyncItem`, if empty the value would be
generated from metadata of current media.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
"""
from plexapi.sync import SyncItem, Policy, MediaSettings
myplex = self._server.myPlexAccount()
sync_item = SyncItem(self._server, None)
sync_item.title = title if title else self._defaultSyncTitle()
sync_item.rootTitle = self.title
sync_item.contentType = self.listType
sync_item.metadataType = self.METADATA_TYPE
sync_item.machineIdentifier = self._server.machineIdentifier
section = self._server.library.sectionByID(self.librarySectionID)
sync_item.location = 'library://%s/item/%s' % (section.uuid, quote_plus(self.key))
sync_item.policy = Policy.create(limit, unwatched)
sync_item.mediaSettings = MediaSettings.createVideo(videoQuality)
return myplex.sync(sync_item, client=client, clientId=clientId)
@utils.registerPlexObject
class Movie(Playable, Video):
""" Represents a single Movie.
Attributes:
TAG (str): 'Video'
TYPE (str): 'movie'
art (str): Key to movie artwork (/library/metadata/<ratingkey>/art/<artid>)
audienceRating (float): Audience rating (usually from Rotten Tomatoes).
audienceRatingImage (str): Key to audience rating image (rottentomatoes://image.rating.spilled)
chapterSource (str): Chapter source (agent; media; mixed).
contentRating (str) Content rating (PG-13; NR; TV-G).
duration (int): Duration of movie in milliseconds.
guid: Plex GUID (com.plexapp.agents.imdb://tt4302938?lang=en).
originalTitle (str): Original title, often the foreign title (転々; 엽기적인 그녀).
originallyAvailableAt (datetime): Datetime movie was released.
primaryExtraKey (str) Primary extra key (/library/metadata/66351).
rating (float): Movie rating (7.9; 9.8; 8.1).
ratingImage (str): Key to rating image (rottentomatoes://image.rating.rotten).
studio (str): Studio that created movie (Di Bonaventura Pictures; 21 Laps Entertainment).
tagline (str): Movie tag line (Back 2 Work; Who says men can't change?).
userRating (float): User rating (2.0; 8.0).
viewOffset (int): View offset in milliseconds.
year (int): Year movie was released.
collections (List<:class:`~plexapi.media.Collection`>): List of collections this media belongs.
countries (List<:class:`~plexapi.media.Country`>): List of countries objects.
directors (List<:class:`~plexapi.media.Director`>): List of director objects.
fields (List<:class:`~plexapi.media.Field`>): List of field objects.
genres (List<:class:`~plexapi.media.Genre`>): List of genre objects.
media (List<:class:`~plexapi.media.Media`>): List of media objects.
producers (List<:class:`~plexapi.media.Producer`>): List of producers objects.
roles (List<:class:`~plexapi.media.Role`>): List of role objects.
writers (List<:class:`~plexapi.media.Writer`>): List of writers objects.
chapters (List<:class:`~plexapi.media.Chapter`>): List of Chapter objects.
similar (List<:class:`~plexapi.media.Similar`>): List of Similar objects.
"""
TAG = 'Video'
TYPE = 'movie'
METADATA_TYPE = 'movie'
_include = ('?checkFiles=1&includeExtras=1&includeRelated=1'
'&includeOnDeck=1&includeChapters=1&includePopularLeaves=1'
'&includeConcerts=1&includePreferences=1')
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
Video._loadData(self, data)
Playable._loadData(self, data)
self._details_key = self.key + self._include
self.art = data.attrib.get('art')
self.audienceRating = utils.cast(float, data.attrib.get('audienceRating'))
self.audienceRatingImage = data.attrib.get('audienceRatingImage')
self.chapterSource = data.attrib.get('chapterSource')
self.contentRating = data.attrib.get('contentRating')
self.duration = utils.cast(int, data.attrib.get('duration'))
self.guid = data.attrib.get('guid')
self.originalTitle = data.attrib.get('originalTitle')
self.originallyAvailableAt = utils.toDatetime(
data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')
self.primaryExtraKey = data.attrib.get('primaryExtraKey')
self.rating = utils.cast(float, data.attrib.get('rating'))
self.ratingImage = data.attrib.get('ratingImage')
self.studio = data.attrib.get('studio')
self.tagline = data.attrib.get('tagline')
self.userRating = utils.cast(float, data.attrib.get('userRating'))
self.viewOffset = utils.cast(int, data.attrib.get('viewOffset', 0))
self.year = utils.cast(int, data.attrib.get('year'))
self.collections = self.findItems(data, media.Collection)
self.countries = self.findItems(data, media.Country)
self.directors = self.findItems(data, media.Director)
self.fields = self.findItems(data, media.Field)
self.genres = self.findItems(data, media.Genre)
self.media = self.findItems(data, media.Media)
self.producers = self.findItems(data, media.Producer)
self.roles = self.findItems(data, media.Role)
self.writers = self.findItems(data, media.Writer)
self.labels = self.findItems(data, media.Label)
self.chapters = self.findItems(data, media.Chapter)
self.similar = self.findItems(data, media.Similar)
@property
def actors(self):
""" Alias to self.roles. """
return self.roles
@property
def locations(self):
""" This does not exist in plex xml response but is added to have a common
interface to get the location of the Movie/Show/Episode
"""
return [part.file for part in self.iterParts() if part]
def _prettyfilename(self):
# This is just for compat.
return self.title
def download(self, savepath=None, keep_original_name=False, **kwargs):
""" Download video files to specified directory.
Parameters:
savepath (str): Defaults to current working dir.
keep_original_name (bool): True to keep the original file name otherwise
a friendlier is generated.
**kwargs: Additional options passed into :func:`~plexapi.base.PlexObject.getStreamURL()`.
"""
filepaths = []
locations = [i for i in self.iterParts() if i]
for location in locations:
name = location.file
if not keep_original_name:
title = self.title.replace(' ', '.')
name = '%s.%s' % (title, location.container)
if kwargs is not None:
url = self.getStreamURL(**kwargs)
else:
self._server.url('%s?download=1' % location.key)
filepath = utils.download(url, self._server._token, filename=name,
savepath=savepath, session=self._server._session)
if filepath:
filepaths.append(filepath)
return filepaths
@utils.registerPlexObject
class Show(Video):
""" Represents a single Show (including all seasons and episodes).
Attributes:
TAG (str): 'Directory'
TYPE (str): 'show'
art (str): Key to show artwork (/library/metadata/<ratingkey>/art/<artid>)
banner (str): Key to banner artwork (/library/metadata/<ratingkey>/art/<artid>)
childCount (int): Unknown.
contentRating (str) Content rating (PG-13; NR; TV-G).
collections (List<:class:`~plexapi.media.Collection`>): List of collections this media belongs.
duration (int): Duration of show in milliseconds.
guid (str): Plex GUID (com.plexapp.agents.imdb://tt4302938?lang=en).
index (int): Plex index (?)
leafCount (int): Unknown.
locations (list<str>): List of locations paths.
originallyAvailableAt (datetime): Datetime show was released.
rating (float): Show rating (7.9; 9.8; 8.1).
studio (str): Studio that created show (Di Bonaventura Pictures; 21 Laps Entertainment).
theme (str): Key to theme resource (/library/metadata/<ratingkey>/theme/<themeid>)
viewedLeafCount (int): Unknown.
year (int): Year the show was released.
genres (List<:class:`~plexapi.media.Genre`>): List of genre objects.
roles (List<:class:`~plexapi.media.Role`>): List of role objects.
similar (List<:class:`~plexapi.media.Similar`>): List of Similar objects.
"""
TAG = 'Directory'
TYPE = 'show'
METADATA_TYPE = 'episode'
_include = ('?checkFiles=1&includeExtras=1&includeRelated=1'
'&includeOnDeck=1&includeChapters=1&includePopularLeaves=1'
'&includeMarkers=1&includeConcerts=1&includePreferences=1')
def __iter__(self):
for season in self.seasons():
yield season
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
Video._loadData(self, data)
# fix key if loaded from search
self.key = self.key.replace('/children', '')
self._details_key = self.key + self._include
self.art = data.attrib.get('art')
self.banner = data.attrib.get('banner')
self.childCount = utils.cast(int, data.attrib.get('childCount'))
self.contentRating = data.attrib.get('contentRating')
self.collections = self.findItems(data, media.Collection)
self.duration = utils.cast(int, data.attrib.get('duration'))
self.guid = data.attrib.get('guid')
self.index = data.attrib.get('index')
self.leafCount = utils.cast(int, data.attrib.get('leafCount'))
self.locations = self.listAttrs(data, 'path', etag='Location')
self.originallyAvailableAt = utils.toDatetime(
data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')
self.rating = utils.cast(float, data.attrib.get('rating'))
self.studio = data.attrib.get('studio')
self.theme = data.attrib.get('theme')
self.viewedLeafCount = utils.cast(int, data.attrib.get('viewedLeafCount'))
self.year = utils.cast(int, data.attrib.get('year'))
self.genres = self.findItems(data, media.Genre)
self.roles = self.findItems(data, media.Role)
self.labels = self.findItems(data, media.Label)
self.similar = self.findItems(data, media.Similar)
@property
def actors(self):
""" Alias to self.roles. """
return self.roles
@property
def isWatched(self):
""" Returns True if this show is fully watched. """
return bool(self.viewedLeafCount == self.leafCount)
def preferences(self):
""" Returns a list of :class:`~plexapi.settings.Preferences` objects. """
items = []
data = self._server.query(self._details_key)
for item in data.iter('Preferences'):
for elem in item:
items.append(settings.Preferences(data=elem, server=self._server))
return items
def hubs(self):
""" Returns a list of :class:`~plexapi.library.Hub` objects. """
data = self._server.query(self._details_key)
for item in data.iter('Related'):
return self.findItems(item, library.Hub)
def onDeck(self):
""" Returns shows On Deck :class:`~plexapi.video.Video` object.
If show is unwatched, return will likely be the first episode.
"""
data = self._server.query(self._details_key)
return self.findItems([item for item in data.iter('OnDeck')][0])[0]
def seasons(self, **kwargs):
""" Returns a list of :class:`~plexapi.video.Season` objects. """
key = '/library/metadata/%s/children?excludeAllLeaves=1' % self.ratingKey
return self.fetchItems(key, **kwargs)
def season(self, title=None):
""" Returns the season with the specified title or number.
Parameters:
title (str or int): Title or Number of the season to return.
"""
key = '/library/metadata/%s/children' % self.ratingKey
if isinstance(title, int):
return self.fetchItem(key, etag='Directory', index__iexact=str(title))
return self.fetchItem(key, etag='Directory', title__iexact=title)
def episodes(self, **kwargs):
""" Returns a list of :class:`~plexapi.video.Episode` objects. """
key = '/library/metadata/%s/allLeaves' % self.ratingKey
return self.fetchItems(key, **kwargs)
def episode(self, title=None, season=None, episode=None):
""" Find a episode using a title or season and episode.
Parameters:
title (str): Title of the episode to return
season (int): Season number (default:None; required if title not specified).
episode (int): Episode number (default:None; required if title not specified).
Raises:
:class:`plexapi.exceptions.BadRequest`: If season and episode is missing.
:class:`plexapi.exceptions.NotFound`: If the episode is missing.
"""
if title:
key = '/library/metadata/%s/allLeaves' % self.ratingKey
return self.fetchItem(key, title__iexact=title)
elif season is not None and episode:
results = [i for i in self.episodes() if i.seasonNumber == season and i.index == episode]
if results:
return results[0]
raise NotFound('Couldnt find %s S%s E%s' % (self.title, season, episode))
raise BadRequest('Missing argument: title or season and episode are required')
def watched(self):
""" Returns list of watched :class:`~plexapi.video.Episode` objects. """
return self.episodes(viewCount__gt=0)
def unwatched(self):
""" Returns list of unwatched :class:`~plexapi.video.Episode` objects. """
return self.episodes(viewCount=0)
def get(self, title=None, season=None, episode=None):
""" Alias to :func:`~plexapi.video.Show.episode()`. """
return self.episode(title, season, episode)
def download(self, savepath=None, keep_original_name=False, **kwargs):
""" Download video files to specified directory.
Parameters:
savepath (str): Defaults to current working dir.
keep_original_name (bool): True to keep the original file name otherwise
a friendlier is generated.
**kwargs: Additional options passed into :func:`~plexapi.base.PlexObject.getStreamURL()`.
"""
filepaths = []
for episode in self.episodes():
filepaths += episode.download(savepath, keep_original_name, **kwargs)
return filepaths
@utils.registerPlexObject
class Season(Video):
""" Represents a single Show Season (including all episodes).
Attributes:
TAG (str): 'Directory'
TYPE (str): 'season'
leafCount (int): Number of episodes in season.
index (int): Season number.
parentKey (str): Key to this seasons :class:`~plexapi.video.Show`.
parentRatingKey (int): Unique key for this seasons :class:`~plexapi.video.Show`.
parentTitle (str): Title of this seasons :class:`~plexapi.video.Show`.
viewedLeafCount (int): Number of watched episodes in season.
"""
TAG = 'Directory'
TYPE = 'season'
METADATA_TYPE = 'episode'
def __iter__(self):
for episode in self.episodes():
yield episode
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
Video._loadData(self, data)
# fix key if loaded from search
self.key = self.key.replace('/children', '')
self.leafCount = utils.cast(int, data.attrib.get('leafCount'))
self.index = utils.cast(int, data.attrib.get('index'))
self.parentKey = data.attrib.get('parentKey')
self.parentRatingKey = utils.cast(int, data.attrib.get('parentRatingKey'))
self.parentTitle = data.attrib.get('parentTitle')
self.viewedLeafCount = utils.cast(int, data.attrib.get('viewedLeafCount'))
def __repr__(self):
return '<%s>' % ':'.join([p for p in [
self.__class__.__name__,
self.key.replace('/library/metadata/', '').replace('/children', ''),
'%s-s%s' % (self.parentTitle.replace(' ', '-')[:20], self.seasonNumber),
] if p])
@property
def isWatched(self):
""" Returns True if this season is fully watched. """
return bool(self.viewedLeafCount == self.leafCount)
@property
def seasonNumber(self):
""" Returns season number. """
return self.index
def episodes(self, **kwargs):
""" Returns a list of :class:`~plexapi.video.Episode` objects. """
key = '/library/metadata/%s/children' % self.ratingKey
return self.fetchItems(key, **kwargs)
def episode(self, title=None, episode=None):
""" Returns the episode with the given title or number.
Parameters:
title (str): Title of the episode to return.
episode (int): Episode number (default:None; required if title not specified).
"""
if not title and not episode:
raise BadRequest('Missing argument, you need to use title or episode.')
key = '/library/metadata/%s/children' % self.ratingKey
if title:
return self.fetchItem(key, title=title)
return self.fetchItem(key, parentIndex=self.index, index=episode)
def get(self, title=None, episode=None):
""" Alias to :func:`~plexapi.video.Season.episode()`. """
return self.episode(title, episode)
def show(self):
""" Return this seasons :func:`~plexapi.video.Show`.. """
return self.fetchItem(int(self.parentRatingKey))
def watched(self):
""" Returns list of watched :class:`~plexapi.video.Episode` objects. """
return self.episodes(watched=True)
def unwatched(self):
""" Returns list of unwatched :class:`~plexapi.video.Episode` objects. """
return self.episodes(watched=False)
def download(self, savepath=None, keep_original_name=False, **kwargs):
""" Download video files to specified directory.
Parameters:
savepath (str): Defaults to current working dir.
keep_original_name (bool): True to keep the original file name otherwise
a friendlier is generated.
**kwargs: Additional options passed into :func:`~plexapi.base.PlexObject.getStreamURL()`.
"""
filepaths = []
for episode in self.episodes():
filepaths += episode.download(savepath, keep_original_name, **kwargs)
return filepaths
def _defaultSyncTitle(self):
""" Returns str, default title for a new syncItem. """
return '%s - %s' % (self.parentTitle, self.title)
@utils.registerPlexObject
class Episode(Playable, Video):
""" Represents a single Shows Episode.
Attributes:
TAG (str): 'Video'
TYPE (str): 'episode'
art (str): Key to episode artwork (/library/metadata/<ratingkey>/art/<artid>)
chapterSource (str): Unknown (media).
contentRating (str) Content rating (PG-13; NR; TV-G).
duration (int): Duration of episode in milliseconds.
grandparentArt (str): Key to this episodes :class:`~plexapi.video.Show` artwork.
grandparentKey (str): Key to this episodes :class:`~plexapi.video.Show`.
grandparentRatingKey (str): Unique key for this episodes :class:`~plexapi.video.Show`.
grandparentTheme (str): Key to this episodes :class:`~plexapi.video.Show` theme.
grandparentThumb (str): Key to this episodes :class:`~plexapi.video.Show` thumb.
grandparentTitle (str): Title of this episodes :class:`~plexapi.video.Show`.
guid (str): Plex GUID (com.plexapp.agents.imdb://tt4302938?lang=en).
index (int): Episode number.
originallyAvailableAt (datetime): Datetime episode was released.
parentIndex (str): Season number of episode.
parentKey (str): Key to this episodes :class:`~plexapi.video.Season`.
parentRatingKey (int): Unique key for this episodes :class:`~plexapi.video.Season`.
parentThumb (str): Key to this episodes thumbnail.
parentTitle (str): Name of this episode's season
title (str): Name of this Episode
rating (float): Movie rating (7.9; 9.8; 8.1).
viewOffset (int): View offset in milliseconds.
year (int): Year episode was released.
directors (List<:class:`~plexapi.media.Director`>): List of director objects.
media (List<:class:`~plexapi.media.Media`>): List of media objects.
writers (List<:class:`~plexapi.media.Writer`>): List of writers objects.
"""
TAG = 'Video'
TYPE = 'episode'
METADATA_TYPE = 'episode'
_include = ('?checkFiles=1&includeExtras=1&includeRelated=1'
'&includeOnDeck=1&includeChapters=1&includePopularLeaves=1'
'&includeMarkers=1&includeConcerts=1&includePreferences=1')
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
Video._loadData(self, data)
Playable._loadData(self, data)
self._details_key = self.key + self._include
self._seasonNumber = None # cached season number
self.art = data.attrib.get('art')
self.chapterSource = data.attrib.get('chapterSource')
self.contentRating = data.attrib.get('contentRating')
self.duration = utils.cast(int, data.attrib.get('duration'))
self.grandparentArt = data.attrib.get('grandparentArt')
self.grandparentKey = data.attrib.get('grandparentKey')
self.grandparentRatingKey = utils.cast(int, data.attrib.get('grandparentRatingKey'))
self.grandparentTheme = data.attrib.get('grandparentTheme')
self.grandparentThumb = data.attrib.get('grandparentThumb')
self.grandparentTitle = data.attrib.get('grandparentTitle')
self.guid = data.attrib.get('guid')
self.index = utils.cast(int, data.attrib.get('index'))
self.originallyAvailableAt = utils.toDatetime(data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')
self.parentIndex = data.attrib.get('parentIndex')
self.parentKey = data.attrib.get('parentKey')
self.parentRatingKey = utils.cast(int, data.attrib.get('parentRatingKey'))
self.parentThumb = data.attrib.get('parentThumb')
self.parentTitle = data.attrib.get('parentTitle')
self.title = data.attrib.get('title')
self.rating = utils.cast(float, data.attrib.get('rating'))
self.viewOffset = utils.cast(int, data.attrib.get('viewOffset', 0))
self.year = utils.cast(int, data.attrib.get('year'))
self.directors = self.findItems(data, media.Director)
self.media = self.findItems(data, media.Media)
self.writers = self.findItems(data, media.Writer)
self.labels = self.findItems(data, media.Label)
self.collections = self.findItems(data, media.Collection)
self.chapters = self.findItems(data, media.Chapter)
self.markers = self.findItems(data, media.Marker)
def __repr__(self):
return '<%s>' % ':'.join([p for p in [
self.__class__.__name__,
self.key.replace('/library/metadata/', '').replace('/children', ''),
'%s-%s' % (self.grandparentTitle.replace(' ', '-')[:20], self.seasonEpisode),
] if p])
def _prettyfilename(self):
""" Returns a human friendly filename. """
return '%s.%s' % (self.grandparentTitle.replace(' ', '.'), self.seasonEpisode)
@property
def locations(self):
""" This does not exist in plex xml response but is added to have a common
interface to get the location of the Movie/Show
"""
return [part.file for part in self.iterParts() if part]
@property
def seasonNumber(self):
""" Returns this episodes season number. """
if self._seasonNumber is None:
self._seasonNumber = self.parentIndex if self.parentIndex else self.season().seasonNumber
return utils.cast(int, self._seasonNumber)
@property
def seasonEpisode(self):
""" Returns the s00e00 string containing the season and episode. """
return 's%se%s' % (str(self.seasonNumber).zfill(2), str(self.index).zfill(2))
@property
def hasIntroMarker(self):
""" Returns True if this episode has an intro marker in the xml. """
if not self.isFullObject():
self.reload()
return any(marker.type == 'intro' for marker in self.markers)
def season(self):
"""" Return this episodes :func:`~plexapi.video.Season`.. """
return self.fetchItem(self.parentKey)
def show(self):
"""" Return this episodes :func:`~plexapi.video.Show`.. """
return self.fetchItem(int(self.grandparentRatingKey))
def _defaultSyncTitle(self):
""" Returns str, default title for a new syncItem. """
return '%s - %s - (%s) %s' % (self.grandparentTitle, self.parentTitle, self.seasonEpisode, self.title)
@utils.registerPlexObject
class Clip(Playable, Video):
""" Represents a single Clip."""
TAG = 'Video'
TYPE = 'clip'
METADATA_TYPE = 'clip'
def _loadData(self, data):
self._data = data
self.addedAt = data.attrib.get('addedAt')
self.duration = data.attrib.get('duration')
self.guid = data.attrib.get('guid')
self.key = data.attrib.get('key')
self.originallyAvailableAt = data.attrib.get('originallyAvailableAt')
self.ratingKey = data.attrib.get('ratingKey')
self.skipDetails = utils.cast(int, data.attrib.get('skipDetails'))
self.subtype = data.attrib.get('subtype')
self.thumb = data.attrib.get('thumb')
self.thumbAspectRatio = data.attrib.get('thumbAspectRatio')
self.title = data.attrib.get('title')
self.type = data.attrib.get('type')
self.year = data.attrib.get('year')
| bsd-3-clause | 2,879,791,555,386,610,700 | 45.545802 | 118 | 0.616701 | false |
MichSchli/QuestionAnsweringGCN | old_version/experiment_construction/experiment_runner_construction/experiment_runner.py | 1 | 2844 | from evaluation.python_evaluator import Evaluator
from helpers.read_conll_files import ConllReader
class ExperimentRunner:
learner = None
train_file_iterator = None
kb_prefix = None
max_elements_to_read = None
def __init__(self, disambiguation=None, score_transform=None):
self.kb_prefix = ""
self.disambiguation = disambiguation
self.score_transform = score_transform
def set_train_file(self, train_file_location):
self.train_file_iterator = ConllReader(train_file_location, self.kb_prefix,
max_elements=self.max_elements_to_read,
disambiguation=self.disambiguation,
score_transform=self.score_transform)
def set_validation_file(self, validation_file_location):
self.validation_file_iterator = ConllReader(validation_file_location, self.kb_prefix,
max_elements=self.max_elements_to_read,
disambiguation=self.disambiguation,
score_transform=self.score_transform)
def set_test_file(self, test_file_location):
self.test_file_iterator = ConllReader(test_file_location, self.kb_prefix,
max_elements=self.max_elements_to_read,
disambiguation=self.disambiguation,
score_transform=self.score_transform)
def set_train_evaluator(self, train_evaluator):
self.train_evaluator = train_evaluator
def set_test_evaluator(self, test_evaluator):
self.test_evaluator = test_evaluator
def set_valid_evaluator(self, valid_evaluator):
self.valid_evaluator = valid_evaluator
def set_kb_prefix(self, prefix):
self.kb_prefix = prefix
def limit_elements(self, limit):
self.max_elements_to_read = limit
def train_and_validate(self):
self.learner.initialize()
best_epochs, performance = self.learner.train_and_validate(self.train_file_iterator, self.validation_file_iterator)
return best_epochs, performance
def evaluate(self, file):
if file == "train_file":
iterator = self.train_file_iterator
evaluator = self.train_evaluator
elif file == "valid_file":
iterator = self.validation_file_iterator
evaluator = self.valid_evaluator
elif file == "test_file":
iterator = self.test_file_iterator
evaluator = self.test_evaluator
predictions = self.learner.predict(iterator)
evaluation = evaluator.evaluate(predictions)
return evaluation | mit | 2,841,240,547,594,646,000 | 40.838235 | 123 | 0.592124 | false |
drednout/deploy_test | hello/settings.py | 1 | 3337 | """
Django settings for gettingstarted project, on Heroku. Fore more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i+acxn5(akgsn!sr4^qgf(^m&*@+g1@u^t@=8s@axc41ml*f=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'base'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'hello.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Parse database configuration from $DATABASE_URL
DATABASES['default'] = dj_database_url.config()
# Enable Persistent Connections
DATABASES['default']['CONN_MAX_AGE'] = 500
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| bsd-2-clause | 4,166,863,583,795,054,000 | 26.578512 | 75 | 0.697932 | false |
ondrokrc/gramps | gramps/webapp/connection.py | 1 | 1944 | # Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2012 Douglas S. Blank <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from urllib.request import (Request, urlopen, HTTPCookieProcessor,
build_opener, install_opener)
from urllib.parse import urlencode
class Connection(object):
"""
>>> conn = Connection()
>>> response = conn.login("http://blankfamily.us/login/", "username", "password")
"""
def login(self, login_url, username, password):
cookies = HTTPCookieProcessor()
opener = build_opener(cookies)
install_opener(opener)
opener.open(login_url)
try:
self.token = [x.value for x in cookies.cookiejar if x.name == 'csrftoken'][0]
except IndexError:
return Exception("no csrftoken")
params = dict(username=username,
password=password,
next="/",
csrfmiddlewaretoken=self.token,
)
login_data = urlencode(params)
request = Request(login_url, login_data)
response = urlopen(request)
if response.geturl() == login_url:
raise Exception("Invalid password")
return response
| gpl-2.0 | 8,125,272,330,686,703,000 | 38.673469 | 89 | 0.649691 | false |
nion-software/nionswift | nion/swift/test/DataGroup_test.py | 1 | 26313 | # standard libraries
import copy
import unittest
# third party libraries
import numpy
# local libraries
from nion.swift import Application
from nion.swift import DocumentController
from nion.swift.model import DataGroup
from nion.swift.model import DataItem
from nion.swift.model import Persistence
from nion.swift.test import TestContext
from nion.ui import TestUI
class TestDataGroupClass(unittest.TestCase):
def setUp(self):
TestContext.begin_leaks()
self.app = Application.Application(TestUI.UserInterface(), set_global=False)
def tearDown(self):
TestContext.end_leaks(self)
def test_deep_copy_should_deep_copy_child_data_groups(self):
with TestContext.create_memory_context() as test_context:
document_model = test_context.create_document_model()
data_group = DataGroup.DataGroup()
document_model.append_data_group(data_group)
data_item1 = DataItem.DataItem(numpy.zeros((8, 8), numpy.uint32))
document_model.append_data_item(data_item1)
data_group.append_display_item(document_model.get_display_item_for_data_item(data_item1))
data_group2 = DataGroup.DataGroup()
data_group.append_data_group(data_group2)
# attempt to copy
data_group_copy = copy.deepcopy(data_group)
document_model.append_data_group(data_group_copy)
# make sure data_groups are not shared
self.assertNotEqual(data_group.data_groups[0], data_group_copy.data_groups[0])
def test_deep_copy_should_not_deep_copy_data_items(self):
with TestContext.create_memory_context() as test_context:
document_model = test_context.create_document_model()
data_group = DataGroup.DataGroup()
document_model.append_data_group(data_group)
data_item = DataItem.DataItem(numpy.zeros((8, 8), numpy.uint32))
document_model.append_data_item(data_item)
display_item = document_model.get_display_item_for_data_item(data_item)
data_group.append_display_item(display_item)
data_group_copy = copy.deepcopy(data_group)
document_model.append_data_group(data_group_copy)
display_item_specifier = Persistence.PersistentObjectSpecifier.read(data_group_copy.display_item_specifiers[0])
self.assertEqual(display_item, document_model.resolve_item_specifier(display_item_specifier))
def test_counted_display_items(self):
# TODO: split test_counted_display_items into separate tests
with TestContext.create_memory_context() as test_context:
document_model = test_context.create_document_model()
data_group = DataGroup.DataGroup()
document_model.append_data_group(data_group)
self.assertEqual(len(data_group.counted_display_items), 0)
self.assertEqual(len(document_model.data_items), 0)
data_item1 = DataItem.DataItem(numpy.zeros((8, 8), numpy.uint32))
document_model.append_data_item(data_item1)
display_item1 = document_model.get_display_item_for_data_item(data_item1)
data_group.append_display_item(display_item1)
# make sure that both top level and data_group see the data item
self.assertEqual(len(document_model.data_items), 1)
self.assertEqual(len(data_group.counted_display_items), 1)
self.assertEqual(list(document_model.display_items), list(data_group.counted_display_items))
self.assertIn(display_item1, list(data_group.counted_display_items.keys()))
# add a child data item and make sure top level and data_group see it
# also check data item.
data_item1a = document_model.get_resample_new(display_item1, display_item1.data_item)
display_item1a = document_model.get_display_item_for_data_item(data_item1a)
data_group.append_display_item(display_item1a)
self.assertEqual(len(document_model.data_items), 2)
self.assertEqual(len(data_group.counted_display_items), 2)
self.assertIn(display_item1, list(data_group.counted_display_items.keys()))
self.assertIn(display_item1a, list(data_group.counted_display_items.keys()))
# add a child data item to the child and make sure top level and data_group match.
# also check data items.
data_item1a1 = document_model.get_resample_new(display_item1a, display_item1a.data_item)
display_item1a1 = document_model.get_display_item_for_data_item(data_item1a1)
data_group.append_display_item(display_item1a1)
self.assertEqual(len(document_model.data_items), 3)
self.assertEqual(len(data_group.counted_display_items), 3)
self.assertIn(display_item1, list(data_group.counted_display_items.keys()))
self.assertIn(display_item1a, list(data_group.counted_display_items.keys()))
self.assertIn(display_item1a1, list(data_group.counted_display_items.keys()))
# now add a data item that already has children
data_item2 = DataItem.DataItem(numpy.zeros((8, 8), numpy.uint32))
document_model.append_data_item(data_item2)
display_item2 = document_model.get_display_item_for_data_item(data_item2)
data_item2a = document_model.get_resample_new(display_item2, display_item2.data_item)
display_item2a = document_model.get_display_item_for_data_item(data_item2a)
data_group.append_display_item(display_item2)
data_group.append_display_item(display_item2a)
self.assertEqual(len(document_model.data_items), 5)
self.assertEqual(len(data_group.counted_display_items), 5)
self.assertIn(data_item2a, document_model.data_items)
self.assertIn(display_item2a, list(data_group.counted_display_items.keys()))
# remove data item without children
document_model.remove_data_item(data_item1a1)
self.assertEqual(len(document_model.data_items), 4)
self.assertEqual(len(data_group.counted_display_items), 4)
# now remove data item with children
document_model.remove_data_item(data_item2)
self.assertEqual(len(document_model.data_items), 2)
self.assertEqual(len(data_group.counted_display_items), 2)
def test_deleting_data_item_removes_it_from_data_group(self):
with TestContext.create_memory_context() as test_context:
document_model = test_context.create_document_model()
data_item = DataItem.DataItem(numpy.random.randn(4, 4))
document_model.append_data_item(data_item)
data_group = DataGroup.DataGroup()
data_group.append_display_item(document_model.get_display_item_for_data_item(data_item))
document_model.append_data_group(data_group)
self.assertEqual(1, len(data_group.display_items))
self.assertEqual(0, len(data_group.data_groups))
document_model.remove_data_item(data_item)
self.assertEqual(0, len(data_group.display_items))
self.assertEqual(0, len(data_group.data_groups))
def test_deleting_data_items_from_data_group_undo_redo(self):
with TestContext.create_memory_context() as test_context:
document_controller = test_context.create_document_controller()
document_model = document_controller.document_model
# setup three data items in a group
data_item1 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item2 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item3 = DataItem.DataItem(numpy.random.randn(4, 4))
document_model.append_data_item(data_item1)
document_model.append_data_item(data_item2)
document_model.append_data_item(data_item3)
data_group = DataGroup.DataGroup()
display_item1 = document_model.get_display_item_for_data_item(data_item1)
display_item2 = document_model.get_display_item_for_data_item(data_item2)
display_item3 = document_model.get_display_item_for_data_item(data_item3)
data_group.append_display_item(display_item1)
data_group.append_display_item(display_item2)
data_group.append_display_item(display_item3)
document_model.append_data_group(data_group)
# remove two of the three
command = DocumentController.DocumentController.RemoveDataGroupDisplayItemsCommand(document_model, data_group, [display_item1, display_item3])
command.perform()
document_controller.push_undo_command(command)
self.assertEqual(1, len(data_group.display_items))
self.assertEqual(0, len(data_group.data_groups))
self.assertEqual(display_item2, data_group.display_items[0])
# undo and check
document_controller.handle_undo()
self.assertEqual(3, len(data_group.display_items))
self.assertEqual(0, len(data_group.data_groups))
self.assertEqual(display_item1, data_group.display_items[0])
self.assertEqual(display_item2, data_group.display_items[1])
self.assertEqual(display_item3, data_group.display_items[2])
# redo and check
document_controller.handle_redo()
self.assertEqual(1, len(data_group.display_items))
self.assertEqual(0, len(data_group.data_groups))
self.assertEqual(display_item2, data_group.display_items[0])
def test_deleting_data_items_out_of_order_from_data_group_undo_redo(self):
with TestContext.create_memory_context() as test_context:
document_controller = test_context.create_document_controller()
document_model = document_controller.document_model
# setup three data items in a group
data_item1 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item2 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item3 = DataItem.DataItem(numpy.random.randn(4, 4))
document_model.append_data_item(data_item1)
document_model.append_data_item(data_item2)
document_model.append_data_item(data_item3)
data_group = DataGroup.DataGroup()
display_item1 = document_model.get_display_item_for_data_item(data_item1)
display_item2 = document_model.get_display_item_for_data_item(data_item2)
display_item3 = document_model.get_display_item_for_data_item(data_item3)
data_group.append_display_item(display_item1)
data_group.append_display_item(display_item2)
data_group.append_display_item(display_item3)
document_model.append_data_group(data_group)
# remove two of the three
command = DocumentController.DocumentController.RemoveDataGroupDisplayItemsCommand(document_model, data_group, [display_item3, display_item1])
command.perform()
document_controller.push_undo_command(command)
self.assertEqual(1, len(data_group.display_items))
self.assertEqual(0, len(data_group.data_groups))
self.assertEqual(display_item2, data_group.display_items[0])
# undo and check
document_controller.handle_undo()
self.assertEqual(3, len(data_group.display_items))
self.assertEqual(0, len(data_group.data_groups))
self.assertEqual(display_item1, data_group.display_items[0])
self.assertEqual(display_item2, data_group.display_items[1])
self.assertEqual(display_item3, data_group.display_items[2])
# redo and check
document_controller.handle_redo()
self.assertEqual(1, len(data_group.display_items))
self.assertEqual(0, len(data_group.data_groups))
self.assertEqual(display_item2, data_group.display_items[0])
def test_insert_data_item_into_data_group_undo_redo(self):
with TestContext.create_memory_context() as test_context:
document_controller = test_context.create_document_controller()
document_model = document_controller.document_model
# setup three data items and put two in a group
data_item1 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item2 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item3 = DataItem.DataItem(numpy.random.randn(4, 4))
document_model.append_data_item(data_item1)
document_model.append_data_item(data_item2)
document_model.append_data_item(data_item3)
data_group = DataGroup.DataGroup()
display_item1 = document_model.get_display_item_for_data_item(data_item1)
display_item2 = document_model.get_display_item_for_data_item(data_item2)
display_item3 = document_model.get_display_item_for_data_item(data_item3)
data_group.append_display_item(display_item1)
data_group.append_display_item(display_item3)
document_model.append_data_group(data_group)
# insert a new one
display_item2 = document_model.get_display_item_for_data_item(data_item2)
command = document_controller.create_insert_data_group_display_items_command(data_group, 1, [display_item2])
command.perform()
document_controller.push_undo_command(command)
self.assertEqual(3, len(data_group.display_items))
self.assertEqual(0, len(data_group.data_groups))
self.assertEqual(display_item1, data_group.display_items[0])
self.assertEqual(display_item2, data_group.display_items[1])
self.assertEqual(display_item3, data_group.display_items[2])
# undo and check
document_controller.handle_undo()
self.assertEqual(2, len(data_group.display_items))
self.assertEqual(0, len(data_group.data_groups))
self.assertEqual(display_item1, data_group.display_items[0])
self.assertEqual(display_item3, data_group.display_items[1])
# redo and check
document_controller.handle_redo()
self.assertEqual(3, len(data_group.display_items))
self.assertEqual(0, len(data_group.data_groups))
self.assertEqual(display_item1, data_group.display_items[0])
self.assertEqual(display_item2, data_group.display_items[1])
self.assertEqual(display_item3, data_group.display_items[2])
def test_insert_data_group_undo_redo(self):
with TestContext.create_memory_context() as test_context:
document_controller = test_context.create_document_controller()
document_model = document_controller.document_model
# setup three data items and put two in a group
data_item1 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item3 = DataItem.DataItem(numpy.random.randn(4, 4))
document_model.append_data_item(data_item1)
document_model.append_data_item(data_item3)
data_group1 = DataGroup.DataGroup()
data_group1.append_display_item(document_model.get_display_item_for_data_item(data_item1))
data_group2 = DataGroup.DataGroup()
data_group3 = DataGroup.DataGroup()
data_group2_uuid = data_group2.uuid
data_group3.append_display_item(document_model.get_display_item_for_data_item(data_item3))
document_model.append_data_group(data_group1)
document_model.append_data_group(data_group3)
# insert the middle data group
command = DocumentController.DocumentController.InsertDataGroupCommand(document_model, document_model._project, 1, data_group2)
command.perform()
document_controller.push_undo_command(command)
self.assertEqual(3, len(document_model.data_groups))
self.assertEqual(data_group1, document_model.data_groups[0])
self.assertEqual(data_group3, document_model.data_groups[2])
self.assertEqual(data_group2_uuid, document_model.data_groups[1].uuid)
# undo and check
document_controller.handle_undo()
self.assertEqual(2, len(document_model.data_groups))
self.assertEqual(data_group1, document_model.data_groups[0])
self.assertEqual(data_group3, document_model.data_groups[1])
# redo and check
document_controller.handle_redo()
self.assertEqual(3, len(document_model.data_groups))
self.assertEqual(data_group1, document_model.data_groups[0])
self.assertEqual(data_group3, document_model.data_groups[2])
self.assertEqual(data_group2_uuid, document_model.data_groups[1].uuid)
def test_remove_data_group_undo_redo(self):
with TestContext.create_memory_context() as test_context:
document_controller = test_context.create_document_controller()
document_model = document_controller.document_model
# setup three data items and put two in a group
data_item1 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item3 = DataItem.DataItem(numpy.random.randn(4, 4))
document_model.append_data_item(data_item1)
document_model.append_data_item(data_item3)
data_group1 = DataGroup.DataGroup()
data_group1.append_display_item(document_model.get_display_item_for_data_item(data_item1))
data_group2 = DataGroup.DataGroup()
data_group3 = DataGroup.DataGroup()
data_group2_uuid = data_group2.uuid
data_group3.append_display_item(document_model.get_display_item_for_data_item(data_item3))
document_model.append_data_group(data_group1)
document_model.append_data_group(data_group2)
document_model.append_data_group(data_group3)
# remove the middle data group
command = DocumentController.DocumentController.RemoveDataGroupCommand(document_model, document_model._project, data_group2)
command.perform()
document_controller.push_undo_command(command)
self.assertEqual(2, len(document_model.data_groups))
self.assertEqual(data_group1, document_model.data_groups[0])
self.assertEqual(data_group3, document_model.data_groups[1])
# undo and check
document_controller.handle_undo()
self.assertEqual(3, len(document_model.data_groups))
self.assertEqual(data_group1, document_model.data_groups[0])
self.assertEqual(data_group3, document_model.data_groups[2])
self.assertEqual(data_group2_uuid, document_model.data_groups[1].uuid)
# redo and check
document_controller.handle_redo()
self.assertEqual(2, len(document_model.data_groups))
self.assertEqual(data_group1, document_model.data_groups[0])
self.assertEqual(data_group3, document_model.data_groups[1])
def test_data_group_rename_undo_redo(self):
with TestContext.create_memory_context() as test_context:
document_controller = test_context.create_document_controller()
document_model = document_controller.document_model
# setup data group
data_group = DataGroup.DataGroup()
data_group.title = "ethel"
document_model.append_data_group(data_group)
# rename
command = document_controller.create_rename_data_group_command(data_group, "fred")
command.perform()
document_controller.push_undo_command(command)
self.assertEqual(data_group.title, "fred")
# undo and check
document_controller.handle_undo()
self.assertEqual(data_group.title, "ethel")
# redo and check
document_controller.handle_redo()
self.assertEqual(data_group.title, "fred")
def test_data_item_removed_implicitly_from_data_group_undo_redo(self):
with TestContext.create_memory_context() as test_context:
document_controller = test_context.create_document_controller()
document_model = document_controller.document_model
# setup three data items and put in a group
data_item1 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item2 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item3 = DataItem.DataItem(numpy.random.randn(4, 4))
document_model.append_data_item(data_item1)
document_model.append_data_item(data_item2)
document_model.append_data_item(data_item3)
data_group = DataGroup.DataGroup()
display_item1 = document_model.get_display_item_for_data_item(data_item1)
display_item2 = document_model.get_display_item_for_data_item(data_item2)
display_item3 = document_model.get_display_item_for_data_item(data_item3)
data_group.append_display_item(display_item1)
data_group.append_display_item(display_item2)
data_group.append_display_item(display_item3)
document_model.append_data_group(data_group)
# remove the 2nd data item
command = document_controller.create_remove_data_items_command([data_item2])
command.perform()
document_controller.push_undo_command(command)
self.assertEqual(2, len(document_model.data_items))
self.assertEqual(2, len(data_group.display_items))
self.assertEqual(document_model.display_items[0], data_group.display_items[0])
self.assertEqual(document_model.display_items[1], data_group.display_items[1])
# undo and check
document_controller.handle_undo()
self.assertEqual(3, len(document_model.data_items))
self.assertEqual(3, len(data_group.display_items))
self.assertEqual(document_model.display_items[0], data_group.display_items[0])
self.assertEqual(document_model.display_items[1], data_group.display_items[1])
self.assertEqual(document_model.display_items[2], data_group.display_items[2])
# redo and check
document_controller.handle_redo()
self.assertEqual(2, len(document_model.data_items))
self.assertEqual(2, len(data_group.display_items))
self.assertEqual(document_model.display_items[0], data_group.display_items[0])
self.assertEqual(document_model.display_items[1], data_group.display_items[1])
def test_inserting_items_data_group_undo_redo(self):
with TestContext.create_memory_context() as test_context:
document_controller = test_context.create_document_controller()
document_model = document_controller.document_model
# setup three data items and put in a group
data_item1 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item3 = DataItem.DataItem(numpy.random.randn(4, 4))
data_item4 = DataItem.DataItem(numpy.random.randn(4, 4))
document_model.append_data_item(data_item1)
document_model.append_data_item(data_item3)
document_model.append_data_item(data_item4)
data_group = DataGroup.DataGroup()
display_item1 = document_model.get_display_item_for_data_item(data_item1)
display_item3 = document_model.get_display_item_for_data_item(data_item3)
display_item4 = document_model.get_display_item_for_data_item(data_item4)
data_group.append_display_item(display_item1)
data_group.append_display_item(display_item3)
document_model.append_data_group(data_group)
self.assertEqual(3, len(document_model.data_items))
self.assertEqual(2, len(data_group.display_items))
self.assertListEqual([data_item1, data_item3, data_item4], list(document_model.data_items))
self.assertListEqual([display_item1, display_item3], list(data_group.display_items))
# insert new items
data_item2 = DataItem.DataItem(numpy.random.randn(4, 4))
document_model.append_data_item(data_item2)
display_item2 = document_model.get_display_item_for_data_item(data_item2)
# hack until InsertDataGroupDisplayItemsCommand handles multiple display items, call it twice
command = DocumentController.DocumentController.InsertDataGroupDisplayItemsCommand(document_controller.document_model, data_group, 1, [display_item2, display_item4])
command.perform()
document_controller.push_undo_command(command)
self.assertEqual(4, len(document_model.data_items))
self.assertEqual(4, len(data_group.display_items))
self.assertListEqual([data_item1, data_item3, data_item4, data_item2], list(document_model.data_items))
self.assertListEqual([display_item1, display_item2, display_item4, display_item3], list(data_group.display_items))
# undo and check
document_controller.handle_undo()
self.assertEqual(4, len(document_model.data_items))
self.assertEqual(2, len(data_group.display_items))
self.assertListEqual([data_item1, data_item3, data_item4, data_item2], list(document_model.data_items))
self.assertListEqual([display_item1, display_item3], list(data_group.display_items))
# redo and check
document_controller.handle_redo()
data_item2 = document_model.data_items[3]
self.assertEqual(4, len(document_model.data_items))
self.assertEqual(4, len(data_group.display_items))
self.assertListEqual([data_item1, data_item3, data_item4, data_item2], list(document_model.data_items))
self.assertListEqual([display_item1, display_item2, display_item4, display_item3], list(data_group.display_items))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -3,856,847,269,952,720,000 | 59.769053 | 177 | 0.65861 | false |
cloudwatt/contrail-api-cli-extra | contrail_api_cli_extra/provision/linklocal.py | 1 | 5991 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from contrail_api_cli.command import Command, Option
from contrail_api_cli.resource import Resource
from contrail_api_cli.exceptions import CommandError, ResourceNotFound
from ..utils import ip_type
class Linklocal(Command):
service_name = Option(help='Linklocal service name',
required=True)
service_ip = Option(help='Linklocal service IP',
type=ip_type,
required=True)
service_port = Option(help='Linklocal service port',
type=int,
required=True)
fabric_dns_service_name = Option(help='DNS service name in the fabric')
fabric_service_ip = Option(help='Service IP in the fabric',
type=ip_type)
fabric_service_port = Option(help='Service port in the fabric',
type=int,
required=True)
def __call__(self, service_name=None, service_ip=None, service_port=None,
fabric_dns_service_name=None, fabric_service_ip=None, fabric_service_port=None):
if not fabric_dns_service_name and not fabric_service_ip:
raise CommandError('--fabric_dns_service_name or --fabric_service_ip required')
self.linklocal_entry = {
'ip_fabric_DNS_service_name': fabric_dns_service_name,
'ip_fabric_service_ip': [],
'ip_fabric_service_port': fabric_service_port,
'linklocal_service_ip': service_ip,
'linklocal_service_port': service_port,
'linklocal_service_name': service_name
}
if fabric_service_ip:
self.linklocal_entry['ip_fabric_service_ip'].append(fabric_service_ip)
try:
self.vrouter_config = Resource('global-vrouter-config',
fq_name='default-global-system-config:default-global-vrouter-config',
fetch=True)
except ResourceNotFound:
global_config = Resource('global-system-config',
fq_name='default-global-system-config')
self.vrouter_config = Resource('global-vrouter-config',
fq_name='default-global-system-config:default-global-vrouter-config',
parent=global_config)
self.vrouter_config.save()
class AddLinklocal(Linklocal):
description = 'Add linklocal service'
def __call__(self, service_name=None, service_ip=None, service_port=None,
fabric_dns_service_name=None, fabric_service_ip=None, fabric_service_port=None):
super(AddLinklocal, self).__call__(service_name, service_ip, service_port,
fabric_dns_service_name, fabric_service_ip, fabric_service_port)
if 'linklocal_services' not in self.vrouter_config:
self.vrouter_config['linklocal_services'] = {}
if 'linklocal_service_entry' not in self.vrouter_config['linklocal_services']:
self.vrouter_config['linklocal_services']['linklocal_service_entry'] = []
if self.linklocal_entry not in self.vrouter_config['linklocal_services']['linklocal_service_entry']:
self.vrouter_config['linklocal_services']['linklocal_service_entry'].append(self.linklocal_entry)
else:
raise CommandError('Linklocal service already present')
self.vrouter_config.save()
class DelLinklocal(Linklocal):
description = 'Remove linklocal service'
def __call__(self, service_name=None, service_ip=None, service_port=None,
fabric_dns_service_name=None, fabric_service_ip=None, fabric_service_port=None):
super(DelLinklocal, self).__call__(service_name, service_ip, service_port,
fabric_dns_service_name, fabric_service_ip, fabric_service_port)
if 'linklocal_services' not in self.vrouter_config:
raise CommandError('Linklocal service not found')
if 'linklocal_service_entry' not in self.vrouter_config['linklocal_services']:
raise CommandError('Linklocal service not found')
try:
self.vrouter_config['linklocal_services']['linklocal_service_entry'].remove(self.linklocal_entry)
except ValueError:
raise CommandError('Linklocal service not found')
self.vrouter_config.save()
class ListLinklocal(Command):
description = 'List linklocal services'
def __call__(self):
try:
vrouter_config = Resource('global-vrouter-config',
fq_name='default-global-system-config:default-global-vrouter-config',
fetch=True)
if 'linklocal_services' in vrouter_config:
linklocal_list = []
for service in vrouter_config['linklocal_services'].get('linklocal_service_entry', []):
linklocal_info = {
'service_name': service['linklocal_service_name'],
'service_ip': service['linklocal_service_ip'],
'service_port': service['linklocal_service_port'],
'fabric_service_port': service['ip_fabric_service_port'],
}
if service.get('ip_fabric_DNS_service_name'):
linklocal_info['fabric_dns_service_name'] = service.get('ip_fabric_DNS_service_name')
if service.get('ip_fabric_service_ip'):
linklocal_info['fabric_service_ip'] = service.get('ip_fabric_service_ip')
linklocal_list.append(linklocal_info)
return json.dumps(linklocal_list, indent=2)
except ResourceNotFound:
pass
return json.dumps([])
| mit | 2,510,622,126,555,327,500 | 48.106557 | 112 | 0.58688 | false |
GLolol/PyLink | plugins/servprotect.py | 1 | 1953 | # servprotect.py: Protects against KILL and nick collision floods
import threading
from pylinkirc import conf, utils
from pylinkirc.log import log
try:
from cachetools import TTLCache
except ImportError:
log.warning('servprotect: expiringdict support is deprecated as of PyLink 3.0; consider installing cachetools instead')
from expiringdict import ExpiringDict as TTLCache
# check for definitions
servprotect_conf = conf.conf.get('servprotect', {})
length = servprotect_conf.get('length', 10)
age = servprotect_conf.get('age', 10)
def _new_cache_dict():
return TTLCache(length, age)
savecache = _new_cache_dict()
killcache = _new_cache_dict()
lock = threading.Lock()
def handle_kill(irc, numeric, command, args):
"""
Tracks kills against PyLink clients. If too many are received,
automatically disconnects from the network.
"""
if (args['userdata'] and irc.is_internal_server(args['userdata'].server)) or irc.is_internal_client(args['target']):
with lock:
if killcache.setdefault(irc.name, 1) >= length:
log.error('(%s) servprotect: Too many kills received, aborting!', irc.name)
irc.disconnect()
log.debug('(%s) servprotect: Incrementing killcache by 1', irc.name)
killcache[irc.name] += 1
utils.add_hook(handle_kill, 'KILL')
def handle_save(irc, numeric, command, args):
"""
Tracks SAVEs (nick collision) against PyLink clients. If too many are received,
automatically disconnects from the network.
"""
if irc.is_internal_client(args['target']):
with lock:
if savecache.setdefault(irc.name, 0) >= length:
log.error('(%s) servprotect: Too many nick collisions, aborting!', irc.name)
irc.disconnect()
log.debug('(%s) servprotect: Incrementing savecache by 1', irc.name)
savecache[irc.name] += 1
utils.add_hook(handle_save, 'SAVE')
| mpl-2.0 | -1,154,787,156,643,393,300 | 33.263158 | 123 | 0.670763 | false |
michaelkourlas/gini | frontend/src/gbuilder/Core/Connection.py | 1 | 2255 | """The logical connection object that links two devices together"""
from Devices.Bridge import *
from Devices.Firewall import *
from Devices.Hub import *
from Devices.Mobile import *
from Devices.Router import *
from Devices.Subnet import *
from Devices.Switch import *
from Devices.UML import *
from Devices.UML_Android import *
from Devices.UML_FreeDOS import *
from Devices.Wireless_access_point import *
from UI.Edge import *
from Devices.REALM import *
from Devices.OpenFlow_Controller import *
# The connection rules for building topologies
connection_rule={}
connection_rule[UML.device_type]=(Switch.device_type, Subnet.device_type, Bridge.device_type, Hub.device_type)
connection_rule[UML_Android.device_type]=connection_rule[UML.device_type]
connection_rule[UML_FreeDOS.device_type]=connection_rule[UML.device_type]
connection_rule[Router.device_type]=(Subnet.device_type, OpenFlow_Controller.device_type)
connection_rule[Switch.device_type]=(UML.device_type, Subnet.device_type, Switch.device_type, REALM.device_type)
connection_rule[Bridge.device_type]=(UML.device_type, Subnet.device_type, REALM.device_type)
connection_rule[Hub.device_type]=(UML.device_type, Subnet.device_type, REALM.device_type)
connection_rule[Wireless_access_point.device_type]=(Mobile.device_type)
connection_rule[Subnet.device_type]=(UML.device_type, Switch.device_type, Router.device_type, Bridge.device_type, Hub.device_type, Firewall.device_type, REALM.device_type)
connection_rule[Mobile.device_type]=(Wireless_access_point.device_type)
connection_rule[Firewall.device_type]=(Subnet.device_type)
connection_rule[REALM.device_type]=(Switch.device_type, Subnet.device_type, Bridge.device_type, Hub.device_type)
connection_rule[OpenFlow_Controller.device_type]=(Router.device_type)
class Connection(Edge):
device_type = "Connection"
def __init__(self, source, dest):
"""
Create a connection to link devices together.
"""
Edge.__init__(self, source, dest)
def getOtherDevice(self, node):
"""
Retrieve the device opposite to node from this connection.
"""
if self.source == node:
return self.dest
return self.source
| mit | 5,052,709,351,077,203,000 | 44.020408 | 171 | 0.735698 | false |
robertdfrench/ghost-ci | cloud_init.py | 1 | 1535 | #!/usr/bin/python2.7
import subprocess
import atexit
import time
import boto3
def shell(command):
subprocess.call(command, shell=True)
class MessageQueue(object):
def __init__(self, name):
self.q = boto3.resource("sqs").
get_queue_by_name(QueueName=queue_name)
def __iter__(self):
return self
def __next__(self):
for message in self.q.retrieve_messages():
yield message.body
message.delete()
class ImpatientHaltingWorker(object):
'''Halts if it becomes impatient while waiting for work'''
def __init__(self, f, work_items, patience=300):
self.f = f
self.work_items = work_items
self.patience = patience
self.mark_time()
def mark_time(self):
self.t = time.time()
@property
def impatient(self):
return self.duration_of_wait > self.patience
@property
def duration_of_wait(self):
return time.time() - self.t
def process_work(self):
for x in self.work_items:
self.f(x)
self.mark_time()
def start(self):
while not self.impatient():
self.process_work()
time.sleep(10)
def impatient_map(f, domain):
ImpatientHaltingWorker(f, domain).start()
if __name__ == "__main__":
atexit.register(subprocess.call, "halt", shell=True)
shell("yum -y update")
shell("yum -y install git docker")
shell("service docker start")
impatient_map(shell, MessageQueue("ghost_ci_commands.fifo"))
| mit | -7,347,001,797,388,041,000 | 22.615385 | 64 | 0.605863 | false |
CybOXProject/python-cybox | cybox/bindings/win_file_object.py | 1 | 40904 | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import sys
from mixbox.binding_utils import *
from . import cybox_common
from . import file_object
class StreamListType(GeneratedsSuper):
"""The StreamListType type specifies a list of NTFS alternate data
streams."""
subclass = None
superclass = None
def __init__(self, Stream=None):
if Stream is None:
self.Stream = []
else:
self.Stream = Stream
def factory(*args_, **kwargs_):
if StreamListType.subclass:
return StreamListType.subclass(*args_, **kwargs_)
else:
return StreamListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Stream(self): return self.Stream
def set_Stream(self, Stream): self.Stream = Stream
def add_Stream(self, value): self.Stream.append(value)
def insert_Stream(self, index, value): self.Stream[index] = value
def hasContent_(self):
if (
self.Stream
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinFileObj:', name_='StreamListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='StreamListType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinFileObj:', name_='StreamListType'):
pass
def exportChildren(self, lwrite, level, namespace_='WinFileObj:', name_='StreamListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Stream_ in self.Stream:
Stream_.export(lwrite, level, 'WinFileObj:', name_='Stream', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Stream':
obj_ = StreamObjectType.factory()
obj_.build(child_)
self.Stream.append(obj_)
# end class StreamListType
class WindowsFilePermissionsType(file_object.FilePermissionsType):
"""The WindowsFilePermissionsType type specifies Windows file
permissions. It imports and extends the file_object.FilePermissionsType from
the CybOX File Object."""
subclass = None
superclass = file_object.FilePermissionsType
def __init__(self, Full_Control=None, Modify=None, Read=None, Read_And_Execute=None, Write=None):
super(WindowsFilePermissionsType, self).__init__()
self.Full_Control = Full_Control
self.Modify = Modify
self.Read = Read
self.Read_And_Execute = Read_And_Execute
self.Write = Write
def factory(*args_, **kwargs_):
if WindowsFilePermissionsType.subclass:
return WindowsFilePermissionsType.subclass(*args_, **kwargs_)
else:
return WindowsFilePermissionsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Full_Control(self): return self.Full_Control
def set_Full_Control(self, Full_Control): self.Full_Control = Full_Control
def get_Modify(self): return self.Modify
def set_Modify(self, Modify): self.Modify = Modify
def get_Read(self): return self.Read
def set_Read(self, Read): self.Read = Read
def get_Read_And_Execute(self): return self.Read_And_Execute
def set_Read_And_Execute(self, Read_And_Execute): self.Read_And_Execute = Read_And_Execute
def get_Write(self): return self.Write
def set_Write(self, Write): self.Write = Write
def hasContent_(self):
if (
self.Full_Control is not None or
self.Modify is not None or
self.Read is not None or
self.Read_And_Execute is not None or
self.Write is not None or
super(WindowsFilePermissionsType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinFileObj:', name_='WindowsFilePermissionsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsFilePermissionsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinFileObj:', name_='WindowsFilePermissionsType'):
super(WindowsFilePermissionsType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsFilePermissionsType')
def exportChildren(self, lwrite, level, namespace_='WinFileObj:', name_='WindowsFilePermissionsType', fromsubclass_=False, pretty_print=True):
super(WindowsFilePermissionsType, self).exportChildren(lwrite, level, 'WinFileObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Full_Control is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sFull_Control>%s</%sFull_Control>%s' % ('WinFileObj:', self.gds_format_boolean(self.Full_Control, input_name='Full_Control'), 'WinFileObj:', eol_))
if self.Modify is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sModify>%s</%sModify>%s' % ('WinFileObj:', self.gds_format_boolean(self.Modify, input_name='Modify'), 'WinFileObj:', eol_))
if self.Read is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sRead>%s</%sRead>%s' % ('WinFileObj:', self.gds_format_boolean(self.Read, input_name='Read'), 'WinFileObj:', eol_))
if self.Read_And_Execute is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sRead_And_Execute>%s</%sRead_And_Execute>%s' % ('WinFileObj:', self.gds_format_boolean(self.Read_And_Execute, input_name='Read_And_Execute'), 'WinFileObj:', eol_))
if self.Write is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sWrite>%s</%sWrite>%s' % ('WinFileObj:', self.gds_format_boolean(self.Write, input_name='Write'), 'WinFileObj:', eol_))
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(WindowsFilePermissionsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Full_Control':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'Full_Control')
self.Full_Control = ival_
elif nodeName_ == 'Modify':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'Modify')
self.Modify = ival_
elif nodeName_ == 'Read':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'Read')
self.Read = ival_
elif nodeName_ == 'Read_And_Execute':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'Read_And_Execute')
self.Read_And_Execute = ival_
elif nodeName_ == 'Write':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'Write')
self.Write = ival_
super(WindowsFilePermissionsType, self).buildChildren(child_, node, nodeName_, True)
# end class WindowsFilePermissionsType
class WindowsFileAttributeType(cybox_common.BaseObjectPropertyType):
"""WindowsFileAttributeType specifies Windows file attributes via a
union of the FileAttributesEnum type and the atomic xs:string
type. Its base type is the CybOX Core cybox_common.BaseObjectPropertyType,
for permitting complex (i.e. regular-expression based)
specifications.This attribute is optional and specifies the
expected type for the value of the specified property."""
subclass = None
superclass = cybox_common.BaseObjectPropertyType
def __init__(self, obfuscation_algorithm_ref=None, refanging_transform_type=None, has_changed=None, delimiter='##comma##', pattern_type=None, datatype='string', refanging_transform=None, is_case_sensitive=True, bit_mask=None, appears_random=None, observed_encoding=None, defanging_algorithm_ref=None, is_obfuscated=None, regex_syntax=None, apply_condition='ANY', trend=None, idref=None, is_defanged=None, id=None, condition=None, valueOf_=None):
super(WindowsFileAttributeType, self).__init__(obfuscation_algorithm_ref, refanging_transform_type, has_changed, delimiter, pattern_type, datatype, refanging_transform, is_case_sensitive, bit_mask, appears_random, observed_encoding, defanging_algorithm_ref, is_obfuscated, regex_syntax, apply_condition, trend, idref, is_defanged, id, condition, valueOf_)
self.datatype = _cast(None, datatype)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if WindowsFileAttributeType.subclass:
return WindowsFileAttributeType.subclass(*args_, **kwargs_)
else:
return WindowsFileAttributeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_datatype(self): return self.datatype
def set_datatype(self, datatype): self.datatype = datatype
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_ or
super(WindowsFileAttributeType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinFileObj:', name_='WindowsFileAttributeType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsFileAttributeType')
if self.hasContent_():
lwrite('>')
lwrite(quote_xml(self.valueOf_))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinFileObj:', name_='WindowsFileAttributeType'):
super(WindowsFileAttributeType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsFileAttributeType')
if self.datatype is not None:
lwrite(' datatype=%s' % (quote_attrib(self.datatype), ))
def exportChildren(self, lwrite, level, namespace_='WinFileObj:', name_='WindowsFileAttributeType', fromsubclass_=False, pretty_print=True):
super(WindowsFileAttributeType, self).exportChildren(lwrite, level, 'WinFileObj:', name_, True, pretty_print=pretty_print)
pass
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('datatype', node)
if value is not None:
self.datatype = value
super(WindowsFileAttributeType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class WindowsFileAttributeType
class WindowsFileAttributesType(file_object.FileAttributeType):
"""The WindowsFileAttributesType type specifies Windows file
attributes. It imports and extends the file_object.FileAttributeType from
the CybOX File Object."""
subclass = None
superclass = file_object.FileAttributeType
def __init__(self, Attribute=None):
super(WindowsFileAttributesType, self).__init__()
if Attribute is None:
self.Attribute = []
else:
self.Attribute = Attribute
def factory(*args_, **kwargs_):
if WindowsFileAttributesType.subclass:
return WindowsFileAttributesType.subclass(*args_, **kwargs_)
else:
return WindowsFileAttributesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Attribute(self): return self.Attribute
def set_Attribute(self, Attribute): self.Attribute = Attribute
def add_Attribute(self, value): self.Attribute.append(value)
def insert_Attribute(self, index, value): self.Attribute[index] = value
def validate_WindowsFileAttributeType(self, value):
# Validate type WindowsFileAttributeType, a restriction on None.
pass
def hasContent_(self):
if (
self.Attribute or
super(WindowsFileAttributesType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinFileObj:', name_='WindowsFileAttributesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsFileAttributesType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinFileObj:', name_='WindowsFileAttributesType'):
super(WindowsFileAttributesType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsFileAttributesType')
def exportChildren(self, lwrite, level, namespace_='WinFileObj:', name_='WindowsFileAttributesType', fromsubclass_=False, pretty_print=True):
super(WindowsFileAttributesType, self).exportChildren(lwrite, level, 'WinFileObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Attribute_ in self.Attribute:
Attribute_.export(lwrite, level, 'WinFileObj:', name_='Attribute', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(WindowsFileAttributesType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Attribute':
obj_ = WindowsFileAttributeType.factory()
obj_.build(child_)
self.Attribute.append(obj_)
super(WindowsFileAttributesType, self).buildChildren(child_, node, nodeName_, True)
# end class WindowsFileAttributesType
class StreamObjectType(cybox_common.HashListType):
"""The StreamObjectType type is intended to characterize NTFS alternate
data streams."""
subclass = None
superclass = cybox_common.HashListType
def __init__(self, Hash=None, Name=None, Size_In_Bytes=None):
super(StreamObjectType, self).__init__(Hash, )
self.Name = Name
self.Size_In_Bytes = Size_In_Bytes
def factory(*args_, **kwargs_):
if StreamObjectType.subclass:
return StreamObjectType.subclass(*args_, **kwargs_)
else:
return StreamObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Name(self): return self.Name
def set_Name(self, Name): self.Name = Name
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_Size_In_Bytes(self): return self.Size_In_Bytes
def set_Size_In_Bytes(self, Size_In_Bytes): self.Size_In_Bytes = Size_In_Bytes
def validate_UnsignedLongObjectPropertyType(self, value):
# Validate type cybox_common.UnsignedLongObjectPropertyType, a restriction on None.
pass
def hasContent_(self):
if (
self.Name is not None or
self.Size_In_Bytes is not None or
super(StreamObjectType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinFileObj:', name_='StreamObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='StreamObjectType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinFileObj:', name_='StreamObjectType'):
super(StreamObjectType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='StreamObjectType')
def exportChildren(self, lwrite, level, namespace_='WinFileObj:', name_='StreamObjectType', fromsubclass_=False, pretty_print=True):
super(StreamObjectType, self).exportChildren(lwrite, level, 'WinFileObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Name is not None:
self.Name.export(lwrite, level, 'WinFileObj:', name_='Name', pretty_print=pretty_print)
if self.Size_In_Bytes is not None:
self.Size_In_Bytes.export(lwrite, level, 'WinFileObj:', name_='Size_In_Bytes', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StreamObjectType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Name':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Name(obj_)
elif nodeName_ == 'Size_In_Bytes':
obj_ = cybox_common.UnsignedLongObjectPropertyType.factory()
obj_.build(child_)
self.set_Size_In_Bytes(obj_)
super(StreamObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class StreamObjectType
class WindowsFileObjectType(file_object.FileObjectType):
"""The WindowsFileObjectType type is intended to characterize Windows
files."""
subclass = None
superclass = file_object.FileObjectType
def __init__(self, object_reference=None, Custom_Properties=None, xsi_type=None, is_packed=None, File_Name=None, File_Path=None, Device_Path=None, Full_Path=None, File_Extension=None, Size_In_Bytes=None, Magic_Number=None, File_Format=None, Hashes=None, Digital_Signatures=None, Modified_Time=None, Accessed_Time=None, Created_Time=None, File_Attributes_List=None, Permissions=None, User_Owner=None, Packer_List=None, Peak_Entropy=None, Sym_Links=None, Byte_Runs=None, Extracted_Features=None, Filename_Accessed_Time=None, Filename_Created_Time=None, Filename_Modified_Time=None, Drive=None, Security_ID=None, Security_Type=None, Stream_List=None):
super(WindowsFileObjectType, self).__init__(object_reference, Custom_Properties, is_packed, File_Name, File_Path, Device_Path, Full_Path, File_Extension, Size_In_Bytes, Magic_Number, File_Format, Hashes, Digital_Signatures, Modified_Time, Accessed_Time, Created_Time, File_Attributes_List, Permissions, User_Owner, Packer_List, Peak_Entropy, Sym_Links, Byte_Runs, Extracted_Features, )
self.Filename_Accessed_Time = Filename_Accessed_Time
self.Filename_Created_Time = Filename_Created_Time
self.Filename_Modified_Time = Filename_Modified_Time
self.Drive = Drive
self.Security_ID = Security_ID
self.Security_Type = Security_Type
self.Stream_List = Stream_List
def factory(*args_, **kwargs_):
if WindowsFileObjectType.subclass:
return WindowsFileObjectType.subclass(*args_, **kwargs_)
else:
return WindowsFileObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Filename_Accessed_Time(self): return self.Filename_Accessed_Time
def set_Filename_Accessed_Time(self, Filename_Accessed_Time): self.Filename_Accessed_Time = Filename_Accessed_Time
def validate_DateTimeObjectPropertyType(self, value):
# Validate type cybox_common.DateTimeObjectPropertyType, a restriction on None.
pass
def get_Filename_Created_Time(self): return self.Filename_Created_Time
def set_Filename_Created_Time(self, Filename_Created_Time): self.Filename_Created_Time = Filename_Created_Time
def get_Filename_Modified_Time(self): return self.Filename_Modified_Time
def set_Filename_Modified_Time(self, Filename_Modified_Time): self.Filename_Modified_Time = Filename_Modified_Time
def get_Drive(self): return self.Drive
def set_Drive(self, Drive): self.Drive = Drive
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_Security_ID(self): return self.Security_ID
def set_Security_ID(self, Security_ID): self.Security_ID = Security_ID
def get_Security_Type(self): return self.Security_Type
def set_Security_Type(self, Security_Type): self.Security_Type = Security_Type
def validate_SIDType(self, value):
# Validate type cybox_common.SIDType, a restriction on None.
pass
def get_Stream_List(self): return self.Stream_List
def set_Stream_List(self, Stream_List): self.Stream_List = Stream_List
def hasContent_(self):
if (
self.Filename_Accessed_Time is not None or
self.Filename_Created_Time is not None or
self.Filename_Modified_Time is not None or
self.Drive is not None or
self.Security_ID is not None or
self.Security_Type is not None or
self.Stream_List is not None or
super(WindowsFileObjectType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinFileObj:', name_='WindowsFileObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsFileObjectType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinFileObj:', name_='WindowsFileObjectType'):
super(WindowsFileObjectType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsFileObjectType')
def exportChildren(self, lwrite, level, namespace_='WinFileObj:', name_='WindowsFileObjectType', fromsubclass_=False, pretty_print=True):
super(WindowsFileObjectType, self).exportChildren(lwrite, level, 'WinFileObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Filename_Accessed_Time is not None:
self.Filename_Accessed_Time.export(lwrite, level, 'WinFileObj:', name_='Filename_Accessed_Time', pretty_print=pretty_print)
if self.Filename_Created_Time is not None:
self.Filename_Created_Time.export(lwrite, level, 'WinFileObj:', name_='Filename_Created_Time', pretty_print=pretty_print)
if self.Filename_Modified_Time is not None:
self.Filename_Modified_Time.export(lwrite, level, 'WinFileObj:', name_='Filename_Modified_Time', pretty_print=pretty_print)
if self.Drive is not None:
self.Drive.export(lwrite, level, 'WinFileObj:', name_='Drive', pretty_print=pretty_print)
if self.Security_ID is not None:
self.Security_ID.export(lwrite, level, 'WinFileObj:', name_='Security_ID', pretty_print=pretty_print)
if self.Security_Type is not None:
self.Security_Type.export(lwrite, level, 'WinFileObj:', name_='Security_Type', pretty_print=pretty_print)
if self.Stream_List is not None:
self.Stream_List.export(lwrite, level, 'WinFileObj:', name_='Stream_List', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(WindowsFileObjectType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Filename_Accessed_Time':
obj_ = cybox_common.DateTimeObjectPropertyType.factory()
obj_.build(child_)
self.set_Filename_Accessed_Time(obj_)
elif nodeName_ == 'Filename_Created_Time':
obj_ = cybox_common.DateTimeObjectPropertyType.factory()
obj_.build(child_)
self.set_Filename_Created_Time(obj_)
elif nodeName_ == 'Filename_Modified_Time':
obj_ = cybox_common.DateTimeObjectPropertyType.factory()
obj_.build(child_)
self.set_Filename_Modified_Time(obj_)
elif nodeName_ == 'Drive':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Drive(obj_)
elif nodeName_ == 'Security_ID':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Security_ID(obj_)
elif nodeName_ == 'Security_Type':
obj_ = cybox_common.SIDType.factory()
obj_.build(child_)
self.set_Security_Type(obj_)
elif nodeName_ == 'Stream_List':
obj_ = StreamListType.factory()
obj_.build(child_)
self.set_Stream_List(obj_)
super(WindowsFileObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class WindowsFileObjectType
GDSClassesMapping = {
'Build_Utility': cybox_common.BuildUtilityType,
'Errors': cybox_common.ErrorsType,
'File_Extension': cybox_common.StringObjectPropertyType,
'Error': cybox_common.ErrorType,
'Opcodes': cybox_common.StringObjectPropertyType,
'Certificate_Issuer': cybox_common.StringObjectPropertyType,
'Metadata': cybox_common.MetadataType,
'Hash': cybox_common.HashType,
'Size_In_Bytes': cybox_common.UnsignedLongObjectPropertyType,
'Information_Source_Type': cybox_common.ControlledVocabularyStringType,
'Block_Hash_Value': cybox_common.HashValueType,
'File_System_Offset': cybox_common.IntegerObjectPropertyType,
'Byte_Runs': cybox_common.ByteRunsType,
'SubDatum': cybox_common.MetadataType,
'Segment_Hash': cybox_common.HashValueType,
'Digital_Signature': cybox_common.DigitalSignatureInfoType,
'Code_Snippets': cybox_common.CodeSnippetsType,
'Value': cybox_common.StringObjectPropertyType,
'Length': cybox_common.IntegerObjectPropertyType,
'Device_Path': cybox_common.StringObjectPropertyType,
'Encoding': cybox_common.ControlledVocabularyStringType,
'Internationalization_Settings': cybox_common.InternationalizationSettingsType,
'Tool_Configuration': cybox_common.ToolConfigurationType,
'Compiler': cybox_common.CompilerType,
'Filename_Created_Time': cybox_common.DateTimeObjectPropertyType,
'Functions': cybox_common.FunctionsType,
'String_Value': cybox_common.StringObjectPropertyType,
'Build_Utility_Platform_Specification': cybox_common.PlatformSpecificationType,
'Compiler_Informal_Description': cybox_common.CompilerInformalDescriptionType,
'System': cybox_common.ObjectPropertiesType,
'Platform': cybox_common.PlatformSpecificationType,
'Version': cybox_common.StringObjectPropertyType,
'Usage_Context_Assumptions': cybox_common.UsageContextAssumptionsType,
'Created_Time': cybox_common.DateTimeObjectPropertyType,
'Type': file_object.PackerClassType,
'Compilers': cybox_common.CompilersType,
'Digital_Signatures': cybox_common.DigitalSignaturesType,
'Tool_Type': cybox_common.ControlledVocabularyStringType,
'String': cybox_common.ExtractedStringType,
'File_Format': cybox_common.StringObjectPropertyType,
'Custom_Properties': cybox_common.CustomPropertiesType,
'Build_Information': cybox_common.BuildInformationType,
'Detected_Entrypoint_Signatures': file_object.EntryPointSignatureListType,
'Tool_Hashes': cybox_common.HashListType,
'File_Path': file_object.FilePathType,
'Entry_Point_Signature': file_object.EntryPointSignatureType,
'Error_Instances': cybox_common.ErrorInstancesType,
'Filename_Modified_Time': cybox_common.DateTimeObjectPropertyType,
'Data_Segment': cybox_common.StringObjectPropertyType,
'Certificate_Subject': cybox_common.StringObjectPropertyType,
'Language': cybox_common.StringObjectPropertyType,
'Signature': cybox_common.StringObjectPropertyType,
'Property': cybox_common.PropertyType,
'Strings': cybox_common.ExtractedStringsType,
'User_Owner': cybox_common.StringObjectPropertyType,
'Contributors': cybox_common.PersonnelType,
'Packer': file_object.PackerType,
'Security_Type': cybox_common.SIDType,
'Reference_Description': cybox_common.StructuredTextType,
'Code_Snippet': cybox_common.ObjectPropertiesType,
'File_Attributes_List': file_object.FileAttributeType,
'Configuration_Settings': cybox_common.ConfigurationSettingsType,
'Simple_Hash_Value': cybox_common.SimpleHashValueType,
'Byte_String_Value': cybox_common.HexBinaryObjectPropertyType,
'Sym_Links': file_object.SymLinksListType,
'Instance': cybox_common.ObjectPropertiesType,
'Packer_List': file_object.PackerListType,
'Import': cybox_common.StringObjectPropertyType,
'Accessed_Time': cybox_common.StringObjectPropertyType,
'Sym_Link': cybox_common.StringObjectPropertyType,
'Identifier': cybox_common.PlatformIdentifierType,
'Tool_Specific_Data': cybox_common.ToolSpecificDataType,
'Execution_Environment': cybox_common.ExecutionEnvironmentType,
'Search_Distance': cybox_common.IntegerObjectPropertyType,
'Dependencies': cybox_common.DependenciesType,
'Offset': cybox_common.IntegerObjectPropertyType,
'Date': cybox_common.DateRangeType,
'Hashes': cybox_common.HashListType,
'Segments': cybox_common.HashSegmentsType,
'Permissions': file_object.FilePermissionsType,
'Segment_Count': cybox_common.IntegerObjectPropertyType,
'Usage_Context_Assumption': cybox_common.StructuredTextType,
'Block_Hash': cybox_common.FuzzyHashBlockType,
'Dependency': cybox_common.DependencyType,
'Filename_Accessed_Time': cybox_common.DateTimeObjectPropertyType,
'Trigger_Point': cybox_common.HexBinaryObjectPropertyType,
'Environment_Variable': cybox_common.EnvironmentVariableType,
'Byte_Run': cybox_common.ByteRunType,
'Image_Offset': cybox_common.IntegerObjectPropertyType,
'Imports': cybox_common.ImportsType,
'Library': cybox_common.LibraryType,
'References': cybox_common.ToolReferencesType,
'Internal_Strings': cybox_common.InternalStringsType,
'Time': cybox_common.TimeType,
'EP_Jump_Codes': file_object.EPJumpCodeType,
'Fuzzy_Hash_Structure': cybox_common.FuzzyHashStructureType,
'File_Name': cybox_common.StringObjectPropertyType,
'Configuration_Setting': cybox_common.ConfigurationSettingType,
'Modified_Time': cybox_common.StringObjectPropertyType,
'Libraries': cybox_common.LibrariesType,
'Security_ID': cybox_common.StringObjectPropertyType,
'Function': cybox_common.StringObjectPropertyType,
'Description': cybox_common.StructuredTextType,
'User_Account_Info': cybox_common.ObjectPropertiesType,
'Build_Configuration': cybox_common.BuildConfigurationType,
'Extracted_Features': cybox_common.ExtractedFeaturesType,
'Magic_Number': cybox_common.HexBinaryObjectPropertyType,
'Address': cybox_common.HexBinaryObjectPropertyType,
'Search_Within': cybox_common.IntegerObjectPropertyType,
'Segment': cybox_common.HashSegmentType,
'Full_Path': cybox_common.StringObjectPropertyType,
'English_Translation': cybox_common.StringObjectPropertyType,
'Name': cybox_common.StringObjectPropertyType,
'Drive': cybox_common.StringObjectPropertyType,
'Depth': cybox_common.IntegerObjectPropertyType,
'Entry_Point': cybox_common.HexBinaryObjectPropertyType,
'Signature_Description': cybox_common.StringObjectPropertyType,
'Block_Size': cybox_common.IntegerObjectPropertyType,
'Compiler_Platform_Specification': cybox_common.PlatformSpecificationType,
'Fuzzy_Hash_Value': cybox_common.FuzzyHashValueType,
'Data_Size': cybox_common.DataSizeType,
'Dependency_Description': cybox_common.StructuredTextType,
'File': file_object.FileObjectType,
'Contributor': cybox_common.ContributorType,
'Peak_Entropy': cybox_common.DoubleObjectPropertyType,
'Tools': cybox_common.ToolsInformationType,
'Tool': cybox_common.ToolInformationType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_File'
rootClass = WindowsFileObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_=rootTag,
# namespacedef_='',
# pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_File'
rootClass = WindowsFileObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from mixbox.vendor.six import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_File'
rootClass = WindowsFileObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_="Windows_File",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
# Register abstract types
setattr(file_object, "WindowsFileAttributesType", WindowsFileAttributesType)
setattr(file_object, "WindowsFilePermissionsType", WindowsFilePermissionsType)
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"WindowsFileObjectType",
"StreamObjectType",
"StreamListType",
"WindowsFileAttributesType",
"WindowsFileAttributeType",
"WindowsFilePermissionsType"
]
| bsd-3-clause | 6,922,594,435,939,465,000 | 49.188957 | 652 | 0.661427 | false |
diamond-org/flask-diamond | flask_diamond/skels/app/+application.module+/migrations/versions/20f04b9598da_flask-diamond-020.py | 1 | 1165 | """update to flask-diamond 0.2.0
Revision ID: 20f04b9598da
Revises: cf0f5b45967
Create Date: 2015-02-07 22:54:24.608403
"""
# revision identifiers, used by Alembic.
revision = '20f04b9598da'
down_revision = 'cf0f5b45967'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('current_login_at', sa.DateTime(), nullable=True))
op.add_column('user', sa.Column('current_login_ip', sa.String(length=255), nullable=True))
op.add_column('user', sa.Column('last_login_at', sa.DateTime(), nullable=True))
op.add_column('user', sa.Column('last_login_ip', sa.String(length=255), nullable=True))
op.add_column('user', sa.Column('login_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'login_count')
op.drop_column('user', 'last_login_ip')
op.drop_column('user', 'last_login_at')
op.drop_column('user', 'current_login_ip')
op.drop_column('user', 'current_login_at')
### end Alembic commands ###
| mit | 2,992,216,872,752,428,500 | 33.264706 | 94 | 0.675536 | false |
dreibh/planetlab-lxc-plcapi | PLC/Accessor.py | 1 | 4296 | #
# Thierry Parmentelat - INRIA
#
#
# just a placeholder for storing accessor-related tag checkers
# this is filled by the accessors factory
#
# NOTE. If you ever come to manually delete a TagType that was created
# by the Factory, you need to restart your python instance / web server
# as the cached information then becomes wrong
from PLC.Logger import logger
from PLC.TagTypes import TagTypes, TagType
from PLC.Roles import Roles, Role
# implementation
class Accessor(object):
"""
This is a placeholder for storing accessor-related tag checkers.
Methods in this class are defined by the accessors factory
Class is implemented as a singleton, so we can cache results over time
"""
_instance = None
tag_locators = {}
def __init__(self, api):
self.api = api
# 'tagname'=>'tag_id'
self.cache = {}
self.hash_name_to_role = {role['name']: role for role in Roles(api)}
def has_cache(self, tagname):
return tagname in self.cache
def get_cache(self, tagname):
return self.cache[tagname]
def set_cache(self, tagname, tag_type):
self.cache[tagname] = tag_type
def locate_or_create_tag(self, tagname, category,
description, roles, enforce=False):
"search tag type from tagname & create if needed"
# cached ?
if self.has_cache(tagname):
return self.get_cache(tagname)
# search
tag_types = TagTypes(self.api, {'tagname': tagname})
if tag_types:
tag_type = tag_types[0]
# enforce should only be set by
# 'service plc start accessors' sequence
if enforce:
try:
tag_type.update({'category': category,
'description': description})
tag_type.sync()
roles_to_add = set(roles).difference(set(tag_type['roles']))
for rolename in roles_to_add:
tag_type.add_role(self.hash_name_to_role[rolename])
roles_to_delete = set(tag_type['roles']). difference(set(roles))
for rolename in roles_to_delete:
tag_type.remove_role(self.hash_name_to_role[rolename])
except:
logger.exception(
"WARNING, Could not enforce tag type, tagname={}\n"
.format(tagname))
else:
# not found: create it
tag_type_fields = {'tagname': tagname,
'category': category,
'description': description}
tag_type = TagType(self.api, tag_type_fields)
tag_type.sync()
for role in roles:
try:
role_obj = Roles(self.api, role)[0]
tag_type.add_role(role_obj)
except:
# xxx todo find a more appropriate way of notifying this
logger.exception("Accessor.locate_or_create_tag: "
"Could not add role {} to tag_type {}"
.format(role,tagname))
self.set_cache(tagname, tag_type)
return tag_type
# a locator is a function that retrieves - or creates - a tag_type instance
@staticmethod
def register_tag_locator(name, tag_locator):
Accessor.tag_locators[name] = tag_locator
@staticmethod
def retrieve_tag_locator(name):
return Accessor.tag_locators[name]
# this is designed to be part of the 'service plc start' sequence
# it ensures the creation of all the tagtypes defined
# in the various accessors, and enforces consistency to the DB
# it's not easy to have define_accessors do this because at
# load-time as we do not have an instance of API yet
def run_all_tag_locators(self):
for (name, tag_locator) in list(Accessor.tag_locators.items()):
tag_locator(self, enforce=True)
####################
# make it a singleton so we can cache stuff in there over time
def AccessorSingleton(api):
if not Accessor._instance:
Accessor._instance = Accessor(api)
return Accessor._instance
| bsd-3-clause | -1,325,702,218,767,049,200 | 35.717949 | 84 | 0.575652 | false |
Andr3iC/juriscraper | opinions/united_states/state/minn.py | 1 | 3672 | # Scraper for Minnesota Supreme Court
#CourtID: minn
#Court Short Name: MN
#Author: Andrei Chelaru
#Reviewer: mlr
#Date: 2014-07-03
from datetime import date
import time
from juriscraper.OpinionSite import OpinionSite
from juriscraper.lib.date_utils import quarter, is_first_month_in_quarter
from lxml import html
import re
from requests.exceptions import HTTPError
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
d = date.today()
self.url = 'http://mn.gov/lawlib/archive/sct{short_year}q{quarter}.html'.format(
short_year=d.strftime("%y"),
quarter=quarter(d.month)
)
# self.url = 'http://mn.gov/lawlib/archive/sct14q3.html'
def _download(self, request_dict={}):
"""Overrides the download function so that we can catch 404 errors
silently. This is necessary because these web pages simply do not exist
for several days at the beginning of each quarter.
"""
try:
return super(Site, self)._download()
except HTTPError, e:
is_first_days_of_the_quarter = (
date.today().day <= 15 and
is_first_month_in_quarter(date.today().month)
)
got_404 = (e.response.status_code == 404)
if got_404 and is_first_days_of_the_quarter:
# Do nothing; abort the crawler
self.status = 200
# We need the body tag here so that xpath works elsewhere.
html_tree = html.fromstring('<html><body></body></html>')
return html_tree
else:
raise e
def _get_case_names(self):
path = '''//ul//li[not(contains(text(), 'ORDER') or
contains(text(), 'NO OPINIONS'))]/text()'''
return list(self.html.xpath(path))
def _get_download_urls(self):
path = '''//ul//li[not(contains(text(), 'ORDER') or
contains(text(), 'NO OPINIONS'))]//@href'''
return list(self.html.xpath(path))
def _get_case_dates(self):
path = '''//ul//h4/text()'''
dates = self.html.xpath(path)
last_date_index = len(dates) - 1
case_dates = []
for index, date_element in enumerate(dates):
if index < last_date_index:
path_2 = ("//h4[{c}]/following-sibling::li/text()[count("
" .|//h4[{n}]/preceding-sibling::li/text())="
"count("
" //h4[{n}]/preceding-sibling::li/text()"
") and not("
" contains(., 'ORDER') or"
" contains(., 'NO OPINIONS')"
")]".format(c=index + 1,
n=index + 2))
else:
path_2 = ("//h4[{c}]/following-sibling::li/text()[not("
" contains(., 'ORDER') or"
" contains(., 'NO OPINIONS'))]").format(c=index + 1)
d = date.fromtimestamp(time.mktime(time.strptime(re.sub(' ', '', str(date_element)), '%B%d,%Y')))
case_dates.extend([d] * len(self.html.xpath(path_2)))
return case_dates
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
def _get_docket_numbers(self):
path = '''//ul//li[not(contains(text(), 'ORDER') or
contains(text(), 'NO OPINIONS'))]/a/text()'''
return list(self.html.xpath(path))
| bsd-2-clause | -1,327,994,466,461,317,600 | 38.913043 | 109 | 0.517974 | false |
wadester/wh_test_py | paramiko_test1.py | 1 | 2480 | #!/usr/bin/env python
# Module: paramiko_test1.py
# Purpose: test paramiko for SSH connections w/ keyless SSH and exceptions
# Date: N/A
# Notes:
# 1) Paramiko home:
# http://www.paramiko.org/
# 2) Multiple SSH discussions about Python
# http://stackoverflow.com/questions/3586106/perform-commands-over-ssh-with-python
# 3) Made DSA keys on Ubuntu, copied pub key to C7 and installed
# in ~/.ssh/authorized_keys (not authorized_keys2).
#
import getopt,sys
import time as t
import paramiko
import socket
# command is uptime
CMD="uptime"
# get the account/host info and password
ACCT_HOST="root@localhost"
PW="password"
verbose=0
def PrintHelp():
print "paramiko_test1.py: test Paramiko SSH Python functions"
print "Options: -h -- help, -v -- enable verbose"
print " -a user@host"
optlist, args = getopt.getopt(sys.argv[1:], 'a:vh')
for o,a in optlist:
if o == '-a':
ACCT_HOST=a
if o == '-v':
verbose=1
if o == '-h':
PrintHelp()
exit(1)
# at this point, we have all info we need
acct, host = ACCT_HOST.split('@', 2)
if (verbose == 1):
print "Paramiko test: verbose enabled"
print "SSH to ", host, "acct=", acct
print "Command=", CMD
try:
ssh=paramiko.SSHClient()
ssh.load_system_host_keys()
# use the following missing_host_key for Ubuntu as Ubuntu uses
# the newer ecdsa keys. Note they can be disabled system-wide:
# /etc/ssh/sshd_config:
# # HostKey /etc/ssh/ssh_host_ecdsa_key
#
# see: http://stackoverflow.com/questions/10670217/paramiko-unknown-server
#
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=acct, timeout=5.0)
for x in range(0, 1000):
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(CMD)
print "Res: ", ssh_stdout.read()
print "Err: ", ssh_stderr.read()
t.sleep(1)
# trap IO error, e.g., errors in many places inside the
# sftp object and other objects.
except IOError as err:
print "IOError", str(err)
sys.exit(2)
# trap SSH errors including closed channel
except paramiko.SSHException as err:
print "SSHException: ", str(err)
sys.exit(2)
# trap socket errors, for example on timeout or closure
except socket.error as err:
print "socket.error", str(err)
sys.exit(2)
except socket.timeout as err:
print "socket.timeout", str(err)
sys.exit(2)
ssh.close()
print "Done!"
| gpl-2.0 | -9,143,382,518,831,262,000 | 26.555556 | 87 | 0.650806 | false |
elishowk/easiparse | easiparse.py | 1 | 2367 | # -*- coding: utf-8 -*-
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from easiparse import importer
import yaml
from glob import glob
import pymongo
import codecs
import threading
from os.path import join, split
from twisted.internet import reactor
reactor.suggestThreadPoolSize(30)
import logging
logging.basicConfig(level=logging.DEBUG, format="%(levelname)-8s %(message)s")
#class AsyncParse(threading.Thread):
# def __init__(self, config, input_path, mongodb, limit=None):
# threading.Thread.__init__(self)
# self.config = config
# self.input_path = input_path
# self.mongodb = mongodb
# self.limit = limit
def worker(config, input_path, mongodb, limit=None):
try:
isi_file = codecs.open(input_path, "rU", encoding="ascii",\
errors="replace")
except Exception, exc:
logging.error("Error reading file %s"%input_path)
return
output_file = codecs.open( join(config['output_path'], split(input_path)[1]),\
"w+", encoding="ascii", errors="replace")
subtotal = importer.main(
isi_file,
config,
output_file,
mongodb,
limit=limit
)
logging.debug("extracted %d matching notices in %s"%(subtotal, isi_file))
if __name__ == "__main__":
config = yaml.load( open( "config.yaml", 'rU' ) )
glob_list = glob(config['input_path'])
mongodb = pymongo.Connection(config['mongo_host'],\
config['mongo_port'])[config['mongo_db_name']]
for input_path in glob_list:
reactor.callInThread(worker, config, input_path, mongodb, limit=None)
#asyncparser = AsyncParse(config, input_path, mongodb, None)
#asyncparser.start()
reactor.run()
#[parser.join() for parser in thread_list]
| gpl-3.0 | 1,788,542,903,113,248,000 | 33.304348 | 82 | 0.672581 | false |
WeblateOrg/weblate | weblate/checks/management/commands/list_checks.py | 1 | 2258 | #
# Copyright © 2012 - 2021 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from textwrap import wrap
from weblate.checks.format import BaseFormatCheck
from weblate.checks.models import CHECKS
from weblate.utils.management.base import BaseCommand
def sorter(check):
if isinstance(check, BaseFormatCheck):
pos = 1
elif check.name < "Formatted strings":
pos = 0
else:
pos = 2
return (check.source, pos, check.name.lower())
class Command(BaseCommand):
help = "List installed checks"
def flush_lines(self, lines):
self.stdout.writelines(lines)
lines.clear()
def handle(self, *args, **options):
"""List installed checks."""
ignores = []
lines = []
for check in sorted(CHECKS.values(), key=sorter):
is_format = isinstance(check, BaseFormatCheck)
# Output immediately
self.stdout.write(f".. _{check.doc_id}:\n")
if not lines:
lines.append("\n")
lines.append(str(check.name))
if is_format:
lines.append("*" * len(check.name))
else:
lines.append("~" * len(check.name))
lines.append("\n")
lines.append("\n".join(wrap(f"*{check.description}*", 79)))
lines.append("\n")
self.flush_lines(lines)
ignores.append(f"``{check.ignore_string}``")
ignores.append(f' Skip the "{check.name}" quality check.')
self.stdout.write("\n")
self.stdout.writelines(ignores)
| gpl-3.0 | -7,192,929,059,752,548,000 | 31.681159 | 73 | 0.629712 | false |
reliableJARED/WebGL | static/ammo.js/make.py | 1 | 5301 | #!/usr/bin/python
import os, sys, re, json, shutil, multiprocessing
from subprocess import Popen, PIPE, STDOUT
# Definitions
INCLUDES = ['btBulletDynamicsCommon.h', os.path.join('BulletCollision', 'CollisionShapes', 'btHeightfieldTerrainShape.h'), os.path.join('BulletCollision', 'CollisionDispatch', 'btGhostObject.h'), os.path.join('BulletDynamics', 'Character', 'btKinematicCharacterController.h'), os.path.join('BulletSoftBody', 'btSoftBody.h'), os.path.join('BulletSoftBody', 'btSoftRigidDynamicsWorld.h'), os.path.join('BulletSoftBody', 'btDefaultSoftBodySolver.h'), os.path.join('BulletSoftBody', 'btSoftBodyRigidBodyCollisionConfiguration.h'), os.path.join('BulletSoftBody', 'btSoftBodyHelpers.h')]
# Startup
exec(open(os.path.expanduser('~/.emscripten'), 'r').read())
try:
EMSCRIPTEN_ROOT
except:
print "ERROR: Missing EMSCRIPTEN_ROOT (which should be equal to emscripten's root dir) in ~/.emscripten"
sys.exit(1)
sys.path.append(EMSCRIPTEN_ROOT)
import tools.shared as emscripten
# Settings
'''
Settings.INLINING_LIMIT = 0
Settings.DOUBLE_MODE = 0
Settings.PRECISE_I64_MATH = 0
Settings.CORRECT_SIGNS = 0
Settings.CORRECT_OVERFLOWS = 0
Settings.CORRECT_ROUNDINGS = 0
'''
emcc_args = sys.argv[1:] or '-O3 --llvm-lto 1 -s NO_EXIT_RUNTIME=1 -s AGGRESSIVE_VARIABLE_ELIMINATION=1 -s NO_DYNAMIC_EXECUTION=1 --memory-init-file 0 -s NO_FILESYSTEM=1 -s EXPORTED_RUNTIME_METHODS=[] -s ELIMINATE_DUPLICATE_FUNCTIONS=1'.split(' ')
emcc_args += ['-s', 'TOTAL_MEMORY=%d' % (64*1024*1024)] # default 64MB. Compile with ALLOW_MEMORY_GROWTH if you want a growable heap (slower though).
#emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=1'] # resizable heap, with some amount of slowness
emcc_args += '-s EXPORT_NAME="AmmoLib" -s MODULARIZE=1'.split(' ')
print
print '--------------------------------------------------'
print 'Building ammo.js, build type:', emcc_args
print '--------------------------------------------------'
print
'''
import os, sys, re
infile = open(sys.argv[1], 'r').read()
outfile = open(sys.argv[2], 'w')
t1 = infile
while True:
t2 = re.sub(r'\(\n?!\n?1\n?\+\n?\(\n?!\n?1\n?\+\n?(\w)\n?\)\n?\)', lambda m: '(!1+' + m.group(1) + ')', t1)
print len(infile), len(t2)
if t1 == t2: break
t1 = t2
outfile.write(t2)
'''
# Utilities
stage_counter = 0
def stage(text):
global stage_counter
stage_counter += 1
text = 'Stage %d: %s' % (stage_counter, text)
print
print '=' * len(text)
print text
print '=' * len(text)
print
# Main
try:
this_dir = os.getcwd()
os.chdir('bullet')
if not os.path.exists('build'):
os.makedirs('build')
os.chdir('build')
stage('Generate bindings')
Popen([emscripten.PYTHON, os.path.join(EMSCRIPTEN_ROOT, 'tools', 'webidl_binder.py'), os.path.join(this_dir, 'ammo.idl'), 'glue']).communicate()
assert os.path.exists('glue.js')
assert os.path.exists('glue.cpp')
stage('Build bindings')
args = ['-I../src', '-c']
for include in INCLUDES:
args += ['-include', include]
emscripten.Building.emcc('glue.cpp', args, 'glue.bc')
assert(os.path.exists('glue.bc'))
# Configure with CMake on Windows, and with configure on Unix.
cmake_build = emscripten.WINDOWS
if cmake_build:
if not os.path.exists('CMakeCache.txt'):
stage('Configure via CMake')
emscripten.Building.configure([emscripten.PYTHON, os.path.join(EMSCRIPTEN_ROOT, 'emcmake'), 'cmake', '..', '-DBUILD_DEMOS=OFF', '-DBUILD_EXTRAS=OFF', '-DBUILD_CPU_DEMOS=OFF', '-DUSE_GLUT=OFF', '-DCMAKE_BUILD_TYPE=Release'])
else:
if not os.path.exists('config.h'):
stage('Configure (if this fails, run autogen.sh in bullet/ first)')
emscripten.Building.configure(['../configure', '--disable-demos','--disable-dependency-tracking'])
stage('Make')
CORES = multiprocessing.cpu_count()
if emscripten.WINDOWS:
emscripten.Building.make(['mingw32-make', '-j', str(CORES)])
else:
emscripten.Building.make(['make', '-j', str(CORES)])
stage('Link')
if cmake_build:
bullet_libs = [os.path.join('src', 'BulletSoftBody', 'libBulletSoftBody.a'),
os.path.join('src', 'BulletDynamics', 'libBulletDynamics.a'),
os.path.join('src', 'BulletCollision', 'libBulletCollision.a'),
os.path.join('src', 'LinearMath', 'libLinearMath.a')]
else:
bullet_libs = [os.path.join('src', '.libs', 'libBulletSoftBody.a'),
os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')]
emscripten.Building.link(['glue.bc'] + bullet_libs, 'libbullet.bc')
assert os.path.exists('libbullet.bc')
stage('emcc: ' + ' '.join(emcc_args))
temp = os.path.join('..', '..', 'builds', 'temp.js')
emscripten.Building.emcc('libbullet.bc', emcc_args + ['--js-transform', 'python %s' % os.path.join('..', '..', 'bundle.py')],
temp)
assert os.path.exists(temp), 'Failed to create script code'
stage('wrap')
wrapped = '''
// This is ammo.js, a port of Bullet Physics to JavaScript. zlib licensed.
''' + open(temp).read() + '''
Ammo = AmmoLib();
'''
open(temp, 'w').write(wrapped)
finally:
os.chdir(this_dir);
| gpl-3.0 | 756,807,422,654,941,000 | 33.2 | 581 | 0.635163 | false |
runt18/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/text.py | 1 | 55450 | """
Classes for including text in a figure.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import artist
from artist import Artist
from cbook import is_string_like, maxdict
from font_manager import FontProperties
from patches import bbox_artist, YAArrow, FancyBboxPatch, \
FancyArrowPatch, Rectangle
import transforms as mtransforms
from transforms import Affine2D, Bbox
from lines import Line2D
import matplotlib.nxutils as nxutils
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
if rotation in ('horizontal', None):
angle = 0.
elif rotation == 'vertical':
angle = 90.
else:
angle = float(rotation)
return angle%360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Text'] = """
========================== =========================================================================
Property Value
========================== =========================================================================
alpha float
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a pad in points
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family [ 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties instance
horizontalalignment or ha [ 'center' | 'right' | 'left' ]
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string eg, ['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
size or fontsize [ size in points | relative size eg 'smaller', 'x-large' ]
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
variant [ 'normal' | 'small-caps' ]
verticalalignment or va [ 'center' | 'top' | 'bottom' | 'baseline' ]
visible [True | False]
weight or fontweight [ 'normal' | 'bold' | 'heavy' | 'light' | 'ultrabold' | 'ultralight']
x float
y float
zorder any number
========================== =========================================================================
"""
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = text.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(-theta)
for t, wh, x, y in text._get_layout(renderer)[1]:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
xt2, yt2 = xt1+w, yt1+h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
def __str__(self):
return "Text({0:g},{1:g},{2!s})".format(self._y, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='bottom',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self.cached = maxdict(5)
self._x, self._y = x, y
if color is None: color = rcParams['text.color']
if fontproperties is None: fontproperties=FontProperties()
elif is_string_like(fontproperties): fontproperties=FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox = None
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.update(kwargs)
#self.set_bbox(dict(pad=0))
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible() or self._renderer is None:
return False,{}
l,b,w,h = self.get_window_extent().bounds
r = l+w
t = b+h
xyverts = (l,b), (l, t), (r, t), (r, b)
x, y = mouseevent.x, mouseevent.y
inside = nxutils.pnpoly(x, y, xyverts)
return inside,{}
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_position()
return self.get_transform().transform_point((x,y))
def _get_multialignment(self):
if self._multialignment is not None: return self._multialignment
else: return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
def _get_layout(self, renderer):
key = self.get_prop_tup()
if key in self.cached: return self.cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self._text.split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, heightt, bl = renderer.get_text_width_height_descent(
'lp', self._fontproperties, ismath=False)
offsety = heightt * self._linespacing
baseline = None
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line)
w, h, d = renderer.get_text_width_height_descent(
clean_line, self._fontproperties, ismath=ismath)
if baseline is None:
baseline = h - d
whs[i] = w, h
horizLayout[i] = thisx, thisy, w, h
thisy -= offsety
width = max(width, w)
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax-ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines)>1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width/2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the targe position offset the display bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign=='center': offsetx = (xmin + width/2.0)
elif halign=='right': offsetx = (xmin + width)
else: offsetx = xmin
if valign=='center': offsety = (ymin + height/2.0)
elif valign=='top': offsety = (ymin + height)
elif valign=='baseline': offsety = (ymin + height) - baseline
else: offsety = ymin
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, zip(lines, whs, xs, ys)
self.cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a rectangle, eg facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
If rectprops has "boxstyle" key. A FancyBboxPatch
is initialized with rectprops and will be drawn. The mutation
scale of the FancyBboxPath is set to the fontsize.
ACCEPTS: rectangle prop dict
"""
# The self._bbox_patch object is created only if rectprops has
# boxstyle key. Otherwise, self._bbox will be set to the
# rectprops and the bbox will be drawn using bbox_artist
# function. This is to keep the backward compatibility.
if rectprops is not None and "boxstyle" in rectprops:
props = rectprops.copy()
boxstyle = props.pop("boxstyle")
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch((0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
self._bbox = None
else:
self._bbox_patch = None
self._bbox = rectprops
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
# For arrow_patch, use textbox as patchA by default.
if not isinstance(self.arrow_patch, FancyArrowPatch):
return
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
#self._bbox_patch.draw(renderer)
else:
props = self._bbox
if props is None: props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = self.get_window_extent(renderer)
l,b,w,h = bbox.bounds
l-=pad/2.
b-=pad/2.
w+=pad
h+=pad
r = Rectangle(xy=(l,b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on( False )
r.update(props)
self.arrow_patch.set_patchA(r)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBoxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible(): return
if self._text=='': return
bbox, info = self._get_layout(renderer)
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if self._bbox_patch:
self._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(self._color)
gc.set_alpha(self._alpha)
gc.set_url(self._url)
if self.get_clip_on():
gc.set_clip_rectangle(self.clipbox)
if self._bbox:
bbox_artist(self, renderer, self._bbox)
angle = self.get_rotation()
if rcParams['text.usetex']:
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_tex(gc, x, y, clean_line,
self._fontproperties, angle)
return
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_text(gc, x, y, clean_line,
self._fontproperties, angle,
ismath=ismath)
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
x, y = self.get_position()
return (x, y, self._text, self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties), self._rotation,
self.figure.dpi, id(self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible(): return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self._text == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx,ty,0,0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info = self._get_layout(self._renderer)
x, y = self.get_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
ACCEPTS: any matplotlib color
"""
if self._bbox is None:
self._bbox = dict(facecolor=color, edgecolor=color)
else:
self._bbox.update(dict(facecolor=color))
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of {0!s}'.format(str(legal)))
self._horizontalalignment = align
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of {0!s}'.format(str(legal)))
self._multialignment = align
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
"""
self._fontproperties.set_family(fontname)
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [ a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed' | 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' | 'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of {0!s}'.format(str(legal)))
self._verticalalignment = align
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '{0!s}'.format(s)
def is_math_text(self, s):
"""
Returns True if the given string *s* contains any mathtext.
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
if rcParams['text.usetex']:
return s, 'TeX'
if even_dollars:
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
artist.kwdocd['Text'] = artist.kwdoc(Text)
Text.__init__.im_func.__doc__ = cbook.dedent(Text.__init__.__doc__) % artist.kwdocd
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (I.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash({0:g},{1:g},{2!s})".format(self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self)]
props.extend([self._x, self._y, self._dashlength, self._dashdirection, self._dashrotation, self._dashpad, self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi*(angle/180.0+dashdirection-1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy+dashpush*cd
c2 = cxy+(dashpush+dashlength)*cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta/cos_theta
dx = w
dy = w*tan_theta
if dy > h or dy < -h:
dy = h
dx = h/tan_theta
cwd = np.array([dx, dy])/2
cwd *= 1+dashpad/np.sqrt(np.dot(cwd,cwd))
cw = c2+(dashdirection*2-1)*cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation is None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
artist.kwdocd['TextWithDash'] = artist.kwdoc(TextWithDash)
class Annotation(Text):
"""
A :class:`~matplotlib.text.Text` class to make annotating things
in the figure, such as :class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes`,
:class:`~matplotlib.patches.Rectangle`, etc., easier.
"""
def __str__(self):
return "Annotation({0:g},{1:g},{2!s})".format(self.xy[0], self.xy[1], repr(self._text))
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
**kwargs):
"""
Annotate the *x*, *y* point *xy* with text *s* at *x*, *y*
location *xytext*. (If *xytext* = *None*, defaults to *xy*,
and if *textcoords* = *None*, defaults to *xycoords*).
*arrowprops*, if not *None*, is a dictionary of line properties
(see :class:`matplotlib.lines.Line2D`) for the arrow that connects
annotation to the point.
If the dictionary has a key *arrowstyle*, a FancyArrowPatch
instance is created with the given dictionary and is
drawn. Otherwise, a YAArow patch instance is created and
drawn. Valid keys for YAArow are
========= =============================================================
Key Description
========= =============================================================
width the width of the arrow in points
frac the fraction of the arrow length occupied by the head
headwidth the width of the base of the arrow head in points
shrink oftentimes it is convenient to have the arrowtip
and base a bit away from the text and point being
annotated. If *d* is the distance between the text and
annotated point, shrink will shorten the arrow so the tip
and base are shink percent of the distance *d* away from the
endpoints. ie, ``shrink=0.05 is 5%%``
? any key for :class:`matplotlib.patches.polygon`
========= =============================================================
Valid keys for FancyArrowPatch are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*xycoords* and *textcoords* are strings that indicate the
coordinates of *xy* and *xytext*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
If a 'points' or 'pixels' option is specified, values will be
added to the bottom-left and if negative, values will be
subtracted from the top-right. Eg::
# 10 points to the right of the left border of the axes and
# 5 points below the top border
xy=(10,-5), xycoords='axes points'
Additional kwargs are Text properties:
%(Text)s
"""
if xytext is None:
xytext = xy
if textcoords is None:
textcoords = xycoords
# we'll draw ourself after the artist we annotate by default
x,y = self.xytext = xytext
Text.__init__(self, x, y, s, **kwargs)
self.xy = xy
self.xycoords = xycoords
self.textcoords = textcoords
self.arrowprops = arrowprops
self.arrow = None
if arrowprops and arrowprops.has_key("arrowstyle"):
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1,1),
**arrowprops)
else:
self.arrow_patch = None
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def contains(self,event):
t,tinfo = Text.contains(self,event)
if self.arrow is not None:
a,ainfo=self.arrow.contains(event)
t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t,tinfo
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def _get_xy(self, x, y, s):
if s=='data':
trans = self.axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s=='offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi/72.
y *= dpi/72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s=='polar':
theta, r = x, y
x = r*np.cos(theta)
y = r*np.sin(theta)
trans = self.axes.transData
return trans.transform_point((x,y))
elif s=='figure points':
#points from the lower left corner of the figure
dpi = self.figure.dpi
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
x *= dpi/72.
y *= dpi/72.
if x<0:
x = r + x
if y<0:
y = t + y
return x,y
elif s=='figure pixels':
#pixels from the lower left corner of the figure
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
if y<0:
y = t + y
return x, y
elif s=='figure fraction':
#(0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x,y))
elif s=='axes points':
#points from the lower left corner of the axes
dpi = self.figure.dpi
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x*dpi/72.
else:
x = l + x*dpi/72.
if y<0:
y = t + y*dpi/72.
else:
y = b + y*dpi/72.
return x, y
elif s=='axes pixels':
#pixels from the lower left corner of the axes
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
else:
x = l + x
if y<0:
y = t + y
else:
y = b + y
return x, y
elif s=='axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = self.axes.transAxes
return trans.transform_point((x, y))
def update_positions(self, renderer):
x, y = self.xytext
self._x, self._y = self._get_xy(x, y, self.textcoords)
x, y = self.xy
x, y = self._get_xy(x, y, self.xycoords)
ox0, oy0 = self._x, self._y
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
l,b,w,h = self.get_window_extent(renderer).bounds
r = l+w
t = b+h
xc = 0.5*(l+r)
yc = 0.5*(b+t)
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# Otherwise, fallback to YAArrow.
#if d.has_key("arrowstyle"):
if self.arrow_patch:
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = self.get_window_extent(renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1,oy1))
mutation_scale = d.pop("mutation_scale", self.get_size())
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
if self._bbox_patch:
patchA = d.pop("patchA", self._bbox_patch)
self.arrow_patch.set_patchA(patchA)
else:
patchA = d.pop("patchA", self._bbox)
self.arrow_patch.set_patchA(patchA)
else:
# pick the x,y corner of the text bbox closest to point
# annotated
dsu = [(abs(val-x0), val) for val in l, r, xc]
dsu.sort()
_, x = dsu[0]
dsu = [(abs(val-y0), val) for val in b, t, yc]
dsu.sort()
_, y = dsu[0]
shrink = d.pop('shrink', 0.0)
theta = math.atan2(y-y0, x-x0)
r = math.sqrt((y-y0)**2. + (x-x0)**2.)
dx = shrink*r*math.cos(theta)
dy = shrink*r*math.sin(theta)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
frac = d.pop('frac', 0.1)
self.arrow = YAArrow(self.figure, (x0+dx,y0+dy), (x-dx, y-dy),
width=width, headwidth=headwidth, frac=frac,
**d)
self.arrow.set_clip_box(self.get_clip_box())
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
self.update_positions(renderer)
self.update_bbox_position_size(renderer)
if self.arrow is not None:
if self.arrow.figure is None and self.figure is not None:
self.arrow.figure = self.figure
self.arrow.draw(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
Text.draw(self, renderer)
artist.kwdocd['Annotation'] = Annotation.__init__.__doc__
| agpl-3.0 | -2,341,165,572,429,736,400 | 33.207279 | 209 | 0.548079 | false |
kolypto/py-mailem | setup.py | 1 | 1208 | #!/usr/bin/env python
""" Slim, flexible, yet full-featured e-mailing library """
from setuptools import setup, find_packages
setup(
# http://pythonhosted.org/setuptools/setuptools.html
name='mailem',
version='0.0.5',
author='Mark Vartanyan',
author_email='[email protected]',
url='https://github.com/kolypto/py-mailem',
license='BSD',
description=__doc__,
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
keywords=['e-mail', 'mail', 'template'],
packages=find_packages(),
scripts=[],
entry_points={},
install_requires=[
'future',
],
extras_require={
},
include_package_data=True,
test_suite='nose.collector',
platforms='any',
classifiers=[
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| bsd-2-clause | -4,548,813,202,391,579,000 | 27.093023 | 71 | 0.620033 | false |
maaruiz/Nominas2015ES | Funciones/Datos/calendario_dat.py | 1 | 9946 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import calendar
import datetime
from Funciones.funs import ultimodiames, select_sql
from Funciones.Datos.contrato_dat import Contrato
from wstools.Utility import DOM
class Calendario:
"""
Realiza los calculos necesarios de los dias de trabajo entre dos fechas.
Se necesitan pasar dos parámetros:
desde -> tipo lista que contiene anio, mes y dia iniciales
desde = (anio, mes, dia)
hasta -> tipo lista que contiene anio, mes y dia finales
hasta = (anio, mes, dia)
"""
def __init__(self, idcalendario, anio):
"""
Inicializa las variables que se necesitan:
primero = dia inicial (variable datetime.date)
ultimo = dia ultimo (variable datetime.date)
Lista desde:
desdeanio = anio inicial (var. INT)
desdemes = mes anio inicial (var INT)
desdedia = dia anio inicial (var INT)
Lista hasta:
hastaanio = anio final (var INT)
hastames = mes anio final (var INT)
hastadia = dia anio final (var INT)
"""
self.calendario_id = idcalendario
self.anio = anio
self.desde(self.anio,1,1)
self.hasta(self.anio,12,31)
sql = ( "Select "
"A.idcalendario, A.descripcion, A.idmunicipio, "
"B.anio, B.mes, B.dia, B.idcal_festivo, "
"B.esfestivo_nnal, B.esfestivo_reg, B.esfestivo_loc, "
"B.esfestivo_convenio "
"From "
"calendario A "
"inner join "
"cal_festivos B "
"on A.idcalendario = B.idcalendario "
"Where "
"A.idcalendario = %s "
"and B.anio = %s;")
self.diasfestivos = select_sql((sql,(self.calendario_id, self.anio)),1)
self.totalanios = self.hastaanio - self.desdeanio
self.nolaboral()
def __call__(self, idcalendario, anio):
self.__init__(idcalendario, anio)
def desde(self, anio, mes, dia):
self.desdeanio = anio
self.desdemes = mes
self.desdedia = dia
self.primero = datetime.date(self.desdeanio, self.desdemes, self.desdedia)
return anio, mes, dia
def hasta(self, anio, mes, dia):
self.hastaanio = anio
self.hastames = mes
self.hastadia = dia
self.ultimo = datetime.date(self.hastaanio, self.hastames, self.hastadia)
return self.hastaanio, self.hastames, self.hastadia
def nolaboral(self):
"""
Calcula el número de fines de semana y festivos entre las fechas introducias y
devuelve los valores de (sab, dom, fes)
"""
sab = 0
dom = 0
fes = 0
for xanio in range(self.desdeanio, self.hastaanio + 1):
if xanio < self.hastaanio and xanio == self.desdeanio:
for xmes in range(self.desdemes, 12 + 1):
if xmes == self.desdemes:
sab = sab + self.diasemana_delmes(xanio, xmes, self.desdedia,
ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes, self.desdedia,
ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio,
xmes, self.desdedia, self.hastadia)
else :
sab = sab + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id,xanio, xmes, 1,
ultimodiames(xmes, xanio))
elif self.hastaanio > xanio > self.desdeanio:
for xmes in range(1,12+1):
sab = sab + self.diasemana_delmes(xanio, xmes,
1, ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes,
1, ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio, xmes,
1, ultimodiames(xmes, xanio))
elif xanio == self.hastaanio and xanio > self.desdeanio:
for xmes in range(1, self.hastames + 1):
if xmes == self.hastames:
sab = sab + self.diasemana_delmes(xanio, xmes, 1, self.hastadia, 5)
dom = dom + self.diasemana_delmes(xanio, xmes, 1, self.hastadia, 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio,
xmes, 1, self.hastadia)
else:
sab = sab + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio,
xmes, 1, ultimodiames(xmes, xanio))
elif xanio == self.hastaanio and xanio == self.desdeanio:
for xmes in range(self.desdemes, self.hastames + 1):
if xmes == self.desdemes and xmes < self.hastames:
sab = sab + self.diasemana_delmes(xanio, xmes, self.desdedia,
ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes, self.desdedia,
ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio,
xmes, self.desdedia,
ultimodiames(xmes, xanio))
elif self.desdemes < xmes < self.hastames:
sab = sab + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio, xmes,
1, ultimodiames(xmes, xanio))
elif xmes > self.desdemes and xmes == self.hastames:
sab = sab + self.diasemana_delmes(xanio, xmes, 1, self.hastadia, 5)
dom = dom + self.diasemana_delmes(xanio, xmes, 1, self.hastadia, 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio, xmes,
1, self.hastadia)
elif xmes == self.desdemes and xmes == self.hastames:
sab = sab + self.diasemana_delmes(xanio, xmes, self.desdedia,
self.hastadia, 5)
dom = dom + self.diasemana_delmes(xanio, xmes, self.desdedia,
self.hastadia, 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio, xmes, self.desdedia,
self.hastadia)
self.totaldomingos = dom
self.totalsabados = sab
self.totalfestivos = fes
self.diastotales = (self.ultimo - self.primero).days + 1
self.totalefectivos = self.diastotales - self.totalsabados - self.totaldomingos - self.totalfestivos
return sab,dom, fes
def festivosdelmes(self, calendario, anio, mes, desdedia, hastadia):
"""
Calcula el numero de dias festivos de un mes teniendo en cuenta las
fechas introducidas.
Los parámetros que hay que introducir son de tipo INT
Dias.festivosdelmes(calendario, anio, mes, desdedia, hastadia)
Los diasfestivos deben aportarse de un calendario externo.
"""
sql = ( "Select "
"count(*) "
"From "
"cal_festivos "
"Where "
"idcalendario = %s "
"and anio = %s "
"and mes = %s "
"and dia >= %s "
"and dia <= %s "
"Group by "
"idcalendario;")
dato = 0
try:
dato = select_sql((sql, (calendario, anio, mes, desdedia, hastadia)))[0]
except:
pass
return dato
def diasemana_delmes(self, anio, mes, desdedia, hastadia, diasemana):
"""
Calcula el número de un dia de la semana entre fechas
0 = lunes
1 = martes
2 = miercoles
3= jueves
4 = viernes
5 = sabado
6 = domingo
"""
calmes = calendar.Calendar().monthdays2calendar(anio, mes)
x = 0
for c in calmes:
if desdedia <= c[diasemana][0] <= hastadia:
x += 1
return x
def dias_entre_fechas(self):
contrato = Contrato()
dia_final = self.hasta(anio, mes, dia) | gpl-3.0 | -3,972,020,743,834,412,000 | 46.574163 | 108 | 0.476363 | false |
Valloric/ycmd | ycmd/tests/go/debug_info_test.py | 1 | 3586 | # Copyright (C) 2016-2017 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from hamcrest import ( assert_that,
contains,
has_entries,
has_entry,
instance_of )
from ycmd.tests.go import ( IsolatedYcmd,
PathToTestFile,
SharedYcmd,
StartGoCompleterServerInDirectory )
from ycmd.tests.test_utils import BuildRequest
@SharedYcmd
def DebugInfo_test( app ):
request_data = BuildRequest( filetype = 'go' )
assert_that(
app.post_json( '/debug_info', request_data ).json,
has_entry( 'completer', has_entries( {
'name': 'Go',
'servers': contains( has_entries( {
'name': 'gopls',
'is_running': instance_of( bool ),
'executable': contains( instance_of( str ),
instance_of( str ),
instance_of( str ),
instance_of( str ) ),
'address': None,
'port': None,
'pid': instance_of( int ),
'logfiles': contains( instance_of( str ) ),
'extras': contains(
has_entries( {
'key': 'Server State',
'value': instance_of( str ),
} ),
has_entries( {
'key': 'Project Directory',
'value': PathToTestFile(),
} ),
has_entries( {
'key': 'Settings',
'value': '{}'
} ),
)
} ) ),
} ) )
)
@IsolatedYcmd
def DebugInfo_ProjectDirectory_test( app ):
project_dir = PathToTestFile( 'td' )
StartGoCompleterServerInDirectory( app, project_dir )
assert_that(
app.post_json( '/debug_info', BuildRequest( filetype = 'go' ) ).json,
has_entry( 'completer', has_entries( {
'name': 'Go',
'servers': contains( has_entries( {
'name': 'gopls',
'is_running': instance_of( bool ),
'executable': contains( instance_of( str ),
instance_of( str ),
instance_of( str ),
instance_of( str ) ),
'address': None,
'port': None,
'pid': instance_of( int ),
'logfiles': contains( instance_of( str ) ),
'extras': contains(
has_entries( {
'key': 'Server State',
'value': instance_of( str ),
} ),
has_entries( {
'key': 'Project Directory',
'value': PathToTestFile(),
} ),
has_entries( {
'key': 'Settings',
'value': '{}'
} ),
)
} ) ),
} ) )
)
| gpl-3.0 | -7,875,975,041,765,725,000 | 31.6 | 73 | 0.528444 | false |
ATRAN2/fureon | backend/tests/test_site_controls.py | 1 | 4705 | import mock
import time
from fureon import db_operations, config, site_controls
from fureon.models import stream_playlist, song
from tests import testing_utils
class TestMainStreamControls(testing_utils.TestingWithDBBaseClass):
@mock.patch('fureon.utils.stream_player.StreamPlayer.update')
def test_load_song_library(self, mock_stream_player_update):
with mock.patch.object(config, 'paths', testing_utils.MOCK_CONFIG_PATHS):
self._stream_controller.load_song_library()
with db_operations.session_scope() as session:
song_manager = song.SongManager(session)
assert 3 == song_manager.get_song_count()
assert mock_stream_player_update.called
def test_add_song_with_user_request_to_playlist(self):
with db_operations.session_scope() as session:
test_songs = session.query(song.Song).all()
self._stream_controller.add_song_with_user_request_to_playlist(
test_songs[0].id, user_requested=False
)
assert 1 == session.query(stream_playlist.Playlist.id).count()
self._stream_controller.add_song_with_user_request_to_playlist(
test_songs[1].id, user_requested=True
)
assert 2 == session.query(stream_playlist.Playlist.id).count()
first_song = session.query(stream_playlist.Playlist).filter_by(id=1).one()
assert first_song.user_requested is False
second_song = session.query(stream_playlist.Playlist).filter_by(id=2).one()
assert second_song.user_requested is True
def test_add_random_song_with_user_request_to_playlist(self):
self._stream_controller.add_random_song_with_user_request_to_playlist()
with db_operations.session_scope() as session:
assert 1 == session.query(stream_playlist.Playlist).count()
def test_add_random_songs_to_playlist_until_max_length(self):
with db_operations.session_scope() as session:
playlist_manager = stream_playlist.PlaylistManager(session)
assert 0 == playlist_manager.get_playlist_length()
self._stream_controller.add_random_songs_to_playlist_until_max_length()
playlist_size = config.stream_options['playlist_size']
assert playlist_size == playlist_manager.get_playlist_length()
@mock.patch('fureon.utils.stream_player.StreamPlayer.crop')
@mock.patch('fureon.utils.stream_player.StreamPlayer.add')
def test_transition_to_next_song(self, mock_add, mock_crop):
with db_operations.session_scope() as session:
playlist_manager = stream_playlist.PlaylistManager(session)
prev_playlist = playlist_manager.get_ordered_playlist()
self._stream_controller.transition_to_next_song()
after_playlist = playlist_manager.get_ordered_playlist()
assert prev_playlist != after_playlist
self._stream_controller.transition_to_next_song()
assert mock_add.called
assert mock_crop.called
@testing_utils.retry_test_n_times(2)
@mock.patch('fureon.components.stream_watcher.StreamPlayerWatcher.run')
@mock.patch('fureon.utils.stream_player.StreamPlayer.add')
@mock.patch('fureon.utils.stream_player.StreamPlayer.crop')
@mock.patch('fureon.utils.stream_player.StreamPlayer.clear')
@mock.patch('fureon.utils.stream_player.StreamPlayer.update')
def test_initialize_stream(self, mock_update, mock_clear, mock_crop, mock_add, mock_run):
with mock.patch.object(config, 'paths', testing_utils.MOCK_CONFIG_PATHS):
self._stream_controller.initialize_stream()
time.sleep(0.2)
assert mock_update.called
assert mock_clear.called
assert mock_crop.called
assert mock_add.called
assert mock_run.called
#
# @mock.patch('fureon.utils.stream_player.StreamPlayer.play')
# def test_run_stream(self, mock_play):
# self._stream_controller.run_stream()
# assert True == mock_play.called
class TestDatabaseControls(object):
@classmethod
def setup_class(cls):
cls._database_controller = site_controls.DatabaseControls()
def setup_method(self, method):
testing_utils.connect_to_temporary_test_db()
def teardown_method(self, method):
testing_utils.empty_temp_directory()
def test_update_song_db(self):
with mock.patch.object(config, 'paths', testing_utils.MOCK_CONFIG_PATHS):
self._database_controller.update_song_db()
with db_operations.session_scope() as session:
all_songs = session.query(song.Song).all()
assert 3 == len(all_songs)
| apache-2.0 | -8,906,072,735,576,366,000 | 46.05 | 94 | 0.673539 | false |
qdqmedia/wiggum | wiggum/users/migrations/0011_auto_20151023_1228.py | 1 | 1421 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
import django.utils.timezone
def gen_uuid(apps, schema_editor):
UserModel = apps.get_model('users', 'User')
for row in UserModel.objects.all():
row.sfa_token = uuid.uuid4()
row.save()
class Migration(migrations.Migration):
dependencies = [
('users', '0010_auto_wiggum_permission_data'),
]
operations = [
migrations.AddField(
model_name='user',
name='sfa_token',
field=models.UUIDField(null=True, editable=False, unique=True, verbose_name='SFA token'),
),
migrations.RunPython(gen_uuid, reverse_code=migrations.RunPython.noop),
migrations.AlterField(
model_name='user',
name='sfa_token',
field=models.UUIDField(editable=False, unique=True, verbose_name='SFA token'),
),
migrations.AddField(
model_name='user',
name='sfa_token_expire',
field=models.DateTimeField(verbose_name='SFA token expiration', default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='user',
name='password_reset_token_expire',
field=models.DateTimeField(verbose_name='Password reset token expiration', default=django.utils.timezone.now),
),
]
| bsd-3-clause | 5,595,121,689,004,847,000 | 29.891304 | 122 | 0.613652 | false |
rec/echomesh | code/python/external/pi3d/constants/gl.py | 1 | 9181 | """
This module contains integer constants from a C header file named something
like gl.h.
"""
GL_DEPTH_BUFFER_BIT = 0x00000100
GL_STENCIL_BUFFER_BIT = 0x00000400
GL_COLOR_BUFFER_BIT = 0x00004000
GL_POINTS = 0x0000
GL_LINES = 0x0001
GL_LINE_LOOP = 0x0002
GL_LINE_STRIP = 0x0003
GL_TRIANGLES = 0x0004
GL_TRIANGLE_STRIP = 0x0005
GL_TRIANGLE_FAN = 0x0006
GL_NEVER = 0x0200
GL_LESS = 0x0201
GL_EQUAL = 0x0202
GL_LEQUAL = 0x0203
GL_GREATER = 0x0204
GL_NOTEQUAL = 0x0205
GL_GEQUAL = 0x0206
GL_ALWAYS = 0x0207
GL_SRC_COLOR = 0x0300
GL_ONE_MINUS_SRC_COLOR = 0x0301
GL_SRC_ALPHA = 0x0302
GL_ONE_MINUS_SRC_ALPHA = 0x0303
GL_DST_ALPHA = 0x0304
GL_ONE_MINUS_DST_ALPHA = 0x0305
GL_DST_COLOR = 0x0306
GL_ONE_MINUS_DST_COLOR = 0x0307
GL_SRC_ALPHA_SATURATE = 0x0308
GL_CLIP_PLANE0 = 0x3000
GL_CLIP_PLANE1 = 0x3001
GL_CLIP_PLANE2 = 0x3002
GL_CLIP_PLANE3 = 0x3003
GL_CLIP_PLANE4 = 0x3004
GL_CLIP_PLANE5 = 0x3005
GL_FRONT = 0x0404
GL_BACK = 0x0405
GL_FRONT_AND_BACK = 0x0408
GL_FOG = 0x0B60
GL_LIGHTING = 0x0B50
GL_TEXTURE_2D = 0x0DE1
GL_CULL_FACE = 0x0B44
GL_ALPHA_TEST = 0x0BC0
GL_BLEND = 0x0BE2
GL_COLOR_LOGIC_OP = 0x0BF2
GL_DITHER = 0x0BD0
GL_STENCIL_TEST = 0x0B90
GL_DEPTH_TEST = 0x0B71
GL_POINT_SMOOTH = 0x0B10
GL_LINE_SMOOTH = 0x0B20
GL_SCISSOR_TEST = 0x0C11
GL_COLOR_MATERIAL = 0x0B57
GL_NORMALIZE = 0x0BA1
GL_RESCALE_NORMAL = 0x803A
GL_POLYGON_OFFSET_FILL = 0x8037
GL_VERTEX_ARRAY = 0x8074
GL_NORMAL_ARRAY = 0x8075
GL_COLOR_ARRAY = 0x8076
GL_TEXTURE_COORD_ARRAY = 0x8078
GL_MULTISAMPLE = 0x809D
GL_SAMPLE_ALPHA_TO_COVERAGE = 0x809E
GL_SAMPLE_ALPHA_TO_ONE = 0x809F
GL_SAMPLE_COVERAGE = 0x80A0
GL_NO_ERROR = 0
GL_INVALID_ENUM = 0x0500
GL_INVALID_VALUE = 0x0501
GL_INVALID_OPERATION = 0x0502
GL_STACK_OVERFLOW = 0x0503
GL_STACK_UNDERFLOW = 0x0504
GL_OUT_OF_MEMORY = 0x0505
GL_INVALID_FRAMEBUFFER_OPERATION = 0x0506
GL_EXP = 0x0800
GL_EXP2 = 0x0801
GL_FOG_DENSITY = 0x0B62
GL_FOG_START = 0x0B63
GL_FOG_END = 0x0B64
GL_FOG_MODE = 0x0B65
GL_FOG_COLOR = 0x0B66
GL_CW = 0x0900
GL_CCW = 0x0901
GL_CURRENT_COLOR = 0x0B00
GL_CURRENT_NORMAL = 0x0B02
GL_CURRENT_TEXTURE_COORDS = 0x0B03
GL_POINT_SIZE = 0x0B11
GL_POINT_SIZE_MIN = 0x8126
GL_POINT_SIZE_MAX = 0x8127
GL_POINT_FADE_THRESHOLD_SIZE = 0x8128
GL_POINT_DISTANCE_ATTENUATION = 0x8129
GL_SMOOTH_POINT_SIZE_RANGE = 0x0B12
GL_LINE_WIDTH = 0x0B21
GL_SMOOTH_LINE_WIDTH_RANGE = 0x0B22
GL_ALIASED_POINT_SIZE_RANGE = 0x846D
GL_ALIASED_LINE_WIDTH_RANGE = 0x846E
GL_CULL_FACE_MODE = 0x0B45
GL_FRONT_FACE = 0x0B46
GL_SHADE_MODEL = 0x0B54
GL_DEPTH_RANGE = 0x0B70
GL_DEPTH_WRITEMASK = 0x0B72
GL_DEPTH_CLEAR_VALUE = 0x0B73
GL_DEPTH_FUNC = 0x0B74
GL_STENCIL_CLEAR_VALUE = 0x0B91
GL_STENCIL_FUNC = 0x0B92
GL_STENCIL_VALUE_MASK = 0x0B93
GL_STENCIL_FAIL = 0x0B94
GL_STENCIL_PASS_DEPTH_FAIL = 0x0B95
GL_STENCIL_PASS_DEPTH_PASS = 0x0B96
GL_STENCIL_REF = 0x0B97
GL_STENCIL_WRITEMASK = 0x0B98
GL_MATRIX_MODE = 0x0BA0
GL_VIEWPORT = 0x0BA2
GL_MODELVIEW_STACK_DEPTH = 0x0BA3
GL_PROJECTION_STACK_DEPTH = 0x0BA4
GL_TEXTURE_STACK_DEPTH = 0x0BA5
GL_MODELVIEW_MATRIX = 0x0BA6
GL_PROJECTION_MATRIX = 0x0BA7
GL_TEXTURE_MATRIX = 0x0BA8
GL_ALPHA_TEST_FUNC = 0x0BC1
GL_ALPHA_TEST_REF = 0x0BC2
GL_BLEND_DST = 0x0BE0
GL_BLEND_SRC = 0x0BE1
GL_LOGIC_OP_MODE = 0x0BF0
GL_SCISSOR_BOX = 0x0C10
GL_SCISSOR_TEST = 0x0C11
GL_COLOR_CLEAR_VALUE = 0x0C22
GL_COLOR_WRITEMASK = 0x0C23
GL_UNPACK_ALIGNMENT = 0x0CF5
GL_PACK_ALIGNMENT = 0x0D05
GL_MAX_LIGHTS = 0x0D31
GL_MAX_CLIP_PLANES = 0x0D32
GL_MAX_TEXTURE_SIZE = 0x0D33
GL_MAX_MODELVIEW_STACK_DEPTH = 0x0D36
GL_MAX_PROJECTION_STACK_DEPTH = 0x0D38
GL_MAX_TEXTURE_STACK_DEPTH = 0x0D39
GL_MAX_VIEWPORT_DIMS = 0x0D3A
GL_MAX_TEXTURE_UNITS = 0x84E2
GL_SUBPIXEL_BITS = 0x0D50
GL_RED_BITS = 0x0D52
GL_GREEN_BITS = 0x0D53
GL_BLUE_BITS = 0x0D54
GL_ALPHA_BITS = 0x0D55
GL_DEPTH_BITS = 0x0D56
GL_STENCIL_BITS = 0x0D57
GL_POLYGON_OFFSET_UNITS = 0x2A00
GL_POLYGON_OFFSET_FILL = 0x8037
GL_POLYGON_OFFSET_FACTOR = 0x8038
GL_TEXTURE_BINDING_2D = 0x8069
GL_VERTEX_ARRAY_SIZE = 0x807A
GL_VERTEX_ARRAY_TYPE = 0x807B
GL_VERTEX_ARRAY_STRIDE = 0x807C
GL_NORMAL_ARRAY_TYPE = 0x807E
GL_NORMAL_ARRAY_STRIDE = 0x807F
GL_COLOR_ARRAY_SIZE = 0x8081
GL_COLOR_ARRAY_TYPE = 0x8082
GL_COLOR_ARRAY_STRIDE = 0x8083
GL_TEXTURE_COORD_ARRAY_SIZE = 0x8088
GL_TEXTURE_COORD_ARRAY_TYPE = 0x8089
GL_TEXTURE_COORD_ARRAY_STRIDE = 0x808A
GL_VERTEX_ARRAY_POINTER = 0x808E
GL_NORMAL_ARRAY_POINTER = 0x808F
GL_COLOR_ARRAY_POINTER = 0x8090
GL_TEXTURE_COORD_ARRAY_POINTER = 0x8092
GL_SAMPLE_BUFFERS = 0x80A8
GL_SAMPLES = 0x80A9
GL_SAMPLE_COVERAGE_VALUE = 0x80AA
GL_SAMPLE_COVERAGE_INVERT = 0x80AB
GL_NUM_COMPRESSED_TEXTURE_FORMATS = 0x86A2
GL_COMPRESSED_TEXTURE_FORMATS = 0x86A3
GL_DONT_CARE = 0x1100
GL_FASTEST = 0x1101
GL_NICEST = 0x1102
GL_PERSPECTIVE_CORRECTION_HINT = 0x0C50
GL_POINT_SMOOTH_HINT = 0x0C51
GL_LINE_SMOOTH_HINT = 0x0C52
GL_FOG_HINT = 0x0C54
GL_GENERATE_MIPMAP_HINT = 0x8192
GL_LIGHT_MODEL_AMBIENT = 0x0B53
GL_LIGHT_MODEL_TWO_SIDE = 0x0B52
GL_AMBIENT = 0x1200
GL_DIFFUSE = 0x1201
GL_SPECULAR = 0x1202
GL_POSITION = 0x1203
GL_SPOT_DIRECTION = 0x1204
GL_SPOT_EXPONENT = 0x1205
GL_SPOT_CUTOFF = 0x1206
GL_CONSTANT_ATTENUATION = 0x1207
GL_LINEAR_ATTENUATION = 0x1208
GL_QUADRATIC_ATTENUATION = 0x1209
GL_BYTE = 0x1400
GL_UNSIGNED_BYTE = 0x1401
GL_SHORT = 0x1402
GL_UNSIGNED_SHORT = 0x1403
GL_FLOAT = 0x1406
GL_FIXED = 0x140C
GL_CLEAR = 0x1500
GL_AND = 0x1501
GL_AND_REVERSE = 0x1502
GL_COPY = 0x1503
GL_AND_INVERTED = 0x1504
GL_NOOP = 0x1505
GL_XOR = 0x1506
GL_OR = 0x1507
GL_NOR = 0x1508
GL_EQUIV = 0x1509
GL_INVERT = 0x150A
GL_OR_REVERSE = 0x150B
GL_COPY_INVERTED = 0x150C
GL_OR_INVERTED = 0x150D
GL_NAND = 0x150E
GL_SET = 0x150F
GL_EMISSION = 0x1600
GL_SHININESS = 0x1601
GL_AMBIENT_AND_DIFFUSE = 0x1602
GL_MODELVIEW = 0x1700
GL_PROJECTION = 0x1701
GL_TEXTURE = 0x1702
GL_ALPHA = 0x1906
GL_RGB = 0x1907
GL_RGBA = 0x1908
GL_LUMINANCE = 0x1909
GL_LUMINANCE_ALPHA = 0x190A
GL_UNPACK_ALIGNMENT = 0x0CF5
GL_PACK_ALIGNMENT = 0x0D05
GL_UNSIGNED_SHORT_4_4_4_4 = 0x8033
GL_UNSIGNED_SHORT_5_5_5_1 = 0x8034
GL_UNSIGNED_SHORT_5_6_5 = 0x8363
GL_FLAT = 0x1D00
GL_SMOOTH = 0x1D01
GL_KEEP = 0x1E00
GL_REPLACE = 0x1E01
GL_INCR = 0x1E02
GL_DECR = 0x1E03
GL_VENDOR = 0x1F00
GL_RENDERER = 0x1F01
GL_VERSION = 0x1F02
GL_EXTENSIONS = 0x1F03
GL_MODULATE = 0x2100
GL_DECAL = 0x2101
GL_ADD = 0x0104
GL_TEXTURE_ENV_MODE = 0x2200
GL_TEXTURE_ENV_COLOR = 0x2201
GL_TEXTURE_ENV = 0x2300
GL_NEAREST = 0x2600
GL_LINEAR = 0x2601
GL_NEAREST_MIPMAP_NEAREST = 0x2700
GL_LINEAR_MIPMAP_NEAREST = 0x2701
GL_NEAREST_MIPMAP_LINEAR = 0x2702
GL_LINEAR_MIPMAP_LINEAR = 0x2703
GL_TEXTURE_MAG_FILTER = 0x2800
GL_TEXTURE_MIN_FILTER = 0x2801
GL_TEXTURE_WRAP_S = 0x2802
GL_TEXTURE_WRAP_T = 0x2803
GL_GENERATE_MIPMAP = 0x8191
GL_TEXTURE0 = 0x84C0
GL_TEXTURE1 = 0x84C1
GL_TEXTURE2 = 0x84C2
GL_TEXTURE3 = 0x84C3
GL_TEXTURE4 = 0x84C4
GL_TEXTURE5 = 0x84C5
GL_TEXTURE6 = 0x84C6
GL_TEXTURE7 = 0x84C7
GL_TEXTURE8 = 0x84C8
GL_TEXTURE9 = 0x84C9
GL_TEXTURE10 = 0x84CA
GL_TEXTURE11 = 0x84CB
GL_TEXTURE12 = 0x84CC
GL_TEXTURE13 = 0x84CD
GL_TEXTURE14 = 0x84CE
GL_TEXTURE15 = 0x84CF
GL_TEXTURE16 = 0x84D0
GL_TEXTURE17 = 0x84D1
GL_TEXTURE18 = 0x84D2
GL_TEXTURE19 = 0x84D3
GL_TEXTURE20 = 0x84D4
GL_TEXTURE21 = 0x84D5
GL_TEXTURE22 = 0x84D6
GL_TEXTURE23 = 0x84D7
GL_TEXTURE24 = 0x84D8
GL_TEXTURE25 = 0x84D9
GL_TEXTURE26 = 0x84DA
GL_TEXTURE27 = 0x84DB
GL_TEXTURE28 = 0x84DC
GL_TEXTURE29 = 0x84DD
GL_TEXTURE30 = 0x84DE
GL_TEXTURE31 = 0x84DF
GL_ACTIVE_TEXTURE = 0x84E0
GL_CLIENT_ACTIVE_TEXTURE = 0x84E1
GL_REPEAT = 0x2901
GL_CLAMP_TO_EDGE = 0x812F
GL_LIGHT0 = 0x4000
GL_LIGHT1 = 0x4001
GL_LIGHT2 = 0x4002
GL_LIGHT3 = 0x4003
GL_LIGHT4 = 0x4004
GL_LIGHT5 = 0x4005
GL_LIGHT6 = 0x4006
GL_LIGHT7 = 0x4007
GL_ARRAY_BUFFER = 0x8892
GL_ELEMENT_ARRAY_BUFFER = 0x8893
GL_ARRAY_BUFFER_BINDING = 0x8894
GL_ELEMENT_ARRAY_BUFFER_BINDING = 0x8895
GL_VERTEX_ARRAY_BUFFER_BINDING = 0x8896
GL_NORMAL_ARRAY_BUFFER_BINDING = 0x8897
GL_COLOR_ARRAY_BUFFER_BINDING = 0x8898
GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING = 0x889A
GL_STATIC_DRAW = 0x88E4
GL_DYNAMIC_DRAW = 0x88E8
GL_BUFFER_SIZE = 0x8764
GL_BUFFER_USAGE = 0x8765
GL_SUBTRACT = 0x84E7
GL_COMBINE = 0x8570
GL_COMBINE_RGB = 0x8571
GL_COMBINE_ALPHA = 0x8572
GL_RGB_SCALE = 0x8573
GL_ADD_SIGNED = 0x8574
GL_INTERPOLATE = 0x8575
GL_CONSTANT = 0x8576
GL_PRIMARY_COLOR = 0x8577
GL_PREVIOUS = 0x8578
GL_OPERAND0_RGB = 0x8590
GL_OPERAND1_RGB = 0x8591
GL_OPERAND2_RGB = 0x8592
GL_OPERAND0_ALPHA = 0x8598
GL_OPERAND1_ALPHA = 0x8599
GL_OPERAND2_ALPHA = 0x859A
GL_ALPHA_SCALE = 0x0D1C
GL_SRC0_RGB = 0x8580
GL_SRC1_RGB = 0x8581
GL_SRC2_RGB = 0x8582
GL_SRC0_ALPHA = 0x8588
GL_SRC1_ALPHA = 0x8589
GL_SRC2_ALPHA = 0x858A
GL_DOT3_RGB = 0x86AE
GL_DOT3_RGBA = 0x86AF
GL_IMPLEMENTATION_COLOR_READ_TYPE_OES = 0x8B9A
GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES = 0x8B9B
GL_PALETTE4_RGB8_OES = 0x8B90
GL_PALETTE4_RGBA8_OES = 0x8B91
GL_PALETTE4_R5_G6_B5_OES = 0x8B92
GL_PALETTE4_RGBA4_OES = 0x8B93
GL_PALETTE4_RGB5_A1_OES = 0x8B94
GL_PALETTE8_RGB8_OES = 0x8B95
GL_PALETTE8_RGBA8_OES = 0x8B96
GL_PALETTE8_R5_G6_B5_OES = 0x8B97
GL_PALETTE8_RGBA4_OES = 0x8B98
GL_PALETTE8_RGB5_A1_OES = 0x8B99
GL_POINT_SIZE_ARRAY_OES = 0x8B9C
GL_POINT_SIZE_ARRAY_TYPE_OES = 0x898A
GL_POINT_SIZE_ARRAY_STRIDE_OES = 0x898B
GL_POINT_SIZE_ARRAY_POINTER_OES = 0x898C
GL_POINT_SIZE_ARRAY_BUFFER_BINDING_OES = 0x8B9F
GL_POINT_SPRITE_OES = 0x8861
GL_COORD_REPLACE_OES = 0x8862
| mit | -6,828,736,053,517,464,000 | 24.861972 | 75 | 0.762444 | false |
wroberts/fsed | setup.py | 1 | 4720 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(HERE, 'README.rst'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
with open(path.join(HERE, 'fsed', 'VERSION'), encoding='utf-8') as f:
VERSION = f.read().strip()
setup(
name='fsed',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=VERSION,
description='Aho-Corasick string replacement utility',
long_description=LONG_DESCRIPTION,
# The project's main homepage.
url='https://github.com/wroberts/fsed',
# Author details
author='Will Roberts',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Environment :: Console',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Software Development :: Pre-processors',
'Topic :: Text Processing :: Filters',
'Topic :: Text Processing :: Indexing',
'Topic :: Text Processing :: Linguistic',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='string search replace rewrite',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['click'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'fsed': ['VERSION'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'fsed=fsed.fsed:main',
],
},
test_suite='nose.collector',
tests_require=['nose'],
)
| mit | -8,081,488,749,389,250,000 | 34.223881 | 94 | 0.650636 | false |
fschimpf/graf2il | GUI/sekvens.py | 1 | 1815 | import sys
sys.path.append('../')
from PyQt4 import QtGui, QtCore
from sekvens_gui import Ui_MainWindow
import graf2il
class MeinProgramm(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
XMLFileToCompileName = 'test'
# connect slots for GUI-elements
self.connect(self.pushButton_2, QtCore.SIGNAL('clicked (bool)'), self.start_graf2il)
self.connect(self.pushButton_3, QtCore.SIGNAL('clicked (bool)'), self.chooseInFile)
self.statusBar().showMessage('Malvik VGS - Fritz Schimpf')
def start_graf2il(self):
self.statusBar().showMessage('Script graf2il aktiv')
# start graf2il with correct setting for target language
if self.comboBox.currentIndex() == 0:
graf2il.main(self.XMLFileToCompileName,'awl')
elif self.comboBox.currentIndex() == 1:
graf2il.main(self.XMLFileToCompileName,'scl')
self.statusBar().showMessage('Malvik VGS - Fritz Schimpf')
def chooseInFile(self):
self.statusBar().showMessage('Velg inputfil')
self.XMLFileToCompileName = str(QtGui.QFileDialog.getOpenFileName(self, 'Velg fil som skal bli oversatt', '', 'JGrafchart XML-filer (*.xml);;alle filer (*.*)'))
self.lineEdit.setText(self.XMLFileToCompileName)
self.statusBar().showMessage('Malvik VGS - Fritz Schimpf')
app = QtGui.QApplication(sys.argv)
#locale = QtCore.QLocale.system().name()
locale = "nb_NO"
#locale = "de_DE"
#locale = "en_EN"
#print (locale)
translator = QtCore.QTranslator()
if translator.load("translation_" + locale, "./"):
app.installTranslator(translator)
programm = MeinProgramm()
programm.show()
sys.exit(app.exec_()) # infinite loop
| gpl-3.0 | -6,599,197,859,360,954,000 | 34.588235 | 168 | 0.660606 | false |
scwuaptx/CTF | 2017-writeup/secuinside/bugsystem.py | 1 | 1764 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pwn import *
#host = "10.211.55.6"
#port = 8888
host = "13.112.128.199"
port = 1337
for i in range(10):
r = remote(host,port)
def add(name,size,content):
r.recvuntil(":")
r.sendline("1")
r.recvuntil(":")
r.sendline(name)
r.recvuntil(":")
r.sendline(str(size))
r.recvuntil(":")
r.send(content)
def edit(idx,types,value,data):
r.recvuntil(":")
r.sendline("3")
r.recvuntil(":")
r.sendline(str(idx))
r.recvuntil(":")
r.sendline(str(types))
r.recvuntil(":")
r.sendline(str(value))
r.recvuntil(":")
r.send(data)
def view():
r.recvuntil(":")
r.sendline("2")
def delbug(idx):
r.recvuntil(":")
r.sendline("4")
r.recvuntil(":")
r.sendline(str(idx))
add("dada",128,"nogg") #1
add("dada",128,"gogo") #2
delbug(1)
add("dada",32,"fuck") #3
view()
r.recvuntil("fuck")
data = r.recvuntil("\n")[:-1]
if len(data) < 4 :
r.close()
continue
libc = u32(data) - 0x1b07f0
print hex(libc)
add("da",32,"sh\x00") #4
add("da",32,"sh\x00") #5
delbug(0)
delbug(3)
delbug(4)
delbug(5)
add("ora",32,"lays")
view()
r.recvuntil("lays")
data = r.recvuntil("\n")[:-1]
if len(data) < 4 :
r.close()
continue
heap = u32(data) - 0x40
print hex(heap)
obj = heap + 0x178
free_hook = libc +0x1b18b0
system = libc + 0x3a940
off = free_hook - obj - 0x100000000
add("sh\x00",0x21000,"/bin/sh\x00")
edit(2,3,off,p32(system))
delbug(7)
r.interactive()
| gpl-2.0 | -8,374,999,728,743,337,000 | 19.511628 | 39 | 0.494331 | false |
yelu/leetcode | DP/UniquePathsII.py | 1 | 1063 | class Solution:
# @param obstacleGrid, a list of lists of integers
# @return an integer
# [[0 for j in range(len(obstacleGrid[i]))]
# for i in range(len(obstacleGrid))]
# F[i][j] = F[i-1][j] + F[i][j-1], F[i][j] if i <0 or j < 0 or
def uniquePathsWithObstacles(self, obstacleGrid):
res = [[0 for j in range(len(obstacleGrid[i]))]
for i in range(len(obstacleGrid))]
for i in range(len(obstacleGrid)) :
for j in range(len(obstacleGrid[i])) :
if not i and not j and not obstacleGrid[0][0]: res[0][0] = 1; continue
if not obstacleGrid[i][j] :
res[i][j] = (0 if i-1<0 else res[i-1][j])+\
(0 if j-1<0 else res[i][j-1])
# for i in range(len(obstacleGrid)) :
# for j in range(len(obstacleGrid[0])) :
# print res[i][j]
return res[len(obstacleGrid)-1][len(obstacleGrid[0])-1]
ob = [
[0,0,0],
[0,1,0],
[0,0,0]
]
sol = Solution()
print sol.uniquePathsWithObstacles(ob)
| gpl-2.0 | 2,702,322,879,902,214,700 | 36.964286 | 86 | 0.528692 | false |
openstack/mistral | mistral/tests/unit/engine/test_error_result.py | 1 | 6921 | # Copyright 2015 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from mistral.db.v2 import api as db_api
from mistral.services import workflows as wf_service
from mistral.tests.unit.engine import base
from mistral.workflow import data_flow
from mistral.workflow import states
from mistral_lib import actions as actions_base
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
WF = """
---
version: '2.0'
wf:
input:
- success_result
- error_result
tasks:
task1:
action: {action_name}
input:
success_result: <% $.success_result %>
error_result: <% $.error_result %>
publish:
p_var: <% task(task1).result %>
on-error:
- task2: <% task(task1).result = 2 %>
- task3: <% task(task1).result = 3 %>
task2:
action: std.noop
task3:
action: std.noop
"""
class MyAction(actions_base.Action):
def __init__(self, success_result, error_result):
self.success_result = success_result
self.error_result = error_result
def run(self, context):
return actions_base.Result(
data=self.success_result,
error=self.error_result
)
def test(self):
raise NotImplementedError
class MyAsyncAction(MyAction):
def is_sync(self):
return False
class ErrorResultTest(base.EngineTestCase):
def setUp(self):
super(ErrorResultTest, self).setUp()
self.register_action_class('my_action', MyAction)
self.register_action_class('my_async_action', MyAsyncAction)
def test_error_result1(self):
wf_service.create_workflows(WF.format(action_name="my_action"))
# Start workflow.
wf_ex = self.engine.start_workflow(
'wf',
wf_input={
'success_result': None,
'error_result': 2
}
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
tasks = wf_ex.task_executions
self.assertEqual(2, len(tasks))
task1 = self._assert_single_item(tasks, name='task1')
task2 = self._assert_single_item(tasks, name='task2')
self.assertEqual(states.ERROR, task1.state)
self.assertEqual(states.SUCCESS, task2.state)
# "publish" clause is ignored in case of ERROR so task execution
# field must be empty.
self.assertDictEqual({}, task1.published)
self.assertEqual(2, data_flow.get_task_execution_result(task1))
def test_error_result2(self):
wf_service.create_workflows(WF.format(action_name="my_action"))
# Start workflow.
wf_ex = self.engine.start_workflow(
'wf',
wf_input={
'success_result': None,
'error_result': 3
}
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
tasks = wf_ex.task_executions
self.assertEqual(2, len(tasks))
task1 = self._assert_single_item(tasks, name='task1')
task3 = self._assert_single_item(tasks, name='task3')
self.assertEqual(states.ERROR, task1.state)
self.assertEqual(states.SUCCESS, task3.state)
# "publish" clause is ignored in case of ERROR so task execution
# field must be empty.
self.assertDictEqual({}, task1.published)
self.assertEqual(3, data_flow.get_task_execution_result(task1))
def test_success_result(self):
wf_service.create_workflows(WF.format(action_name="my_action"))
# Start workflow.
wf_ex = self.engine.start_workflow(
'wf',
wf_input={
'success_result': 'success',
'error_result': None
}
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
tasks = wf_ex.task_executions
self.assertEqual(1, len(tasks))
task1 = self._assert_single_item(tasks, name='task1')
self.assertEqual(states.SUCCESS, task1.state)
# "publish" clause is ignored in case of ERROR so task execution
# field must be empty.
self.assertDictEqual({'p_var': 'success'}, task1.published)
self.assertEqual(
'success',
data_flow.get_task_execution_result(task1)
)
def test_async_error_result(self):
wf_service.create_workflows(WF.format(action_name="my_async_action"))
# Start workflow.
wf_ex = self.engine.start_workflow(
'wf',
wf_input={
'success_result': None,
'error_result': 2
}
)
# If the action errors, we expect the workflow to continue. The
# on-error means the workflow ends in success.
self.await_workflow_success(wf_ex.id)
def test_async_success_result(self):
wf_service.create_workflows(WF.format(action_name="my_async_action"))
# Start workflow.
wf_ex = self.engine.start_workflow(
'wf',
wf_input={
'success_result': 'success',
'error_result': None
}
)
# When the action is successful, the workflow will wait in the RUNNING
# state for it to complete.
self.await_workflow_running(wf_ex.id)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
tasks = wf_ex.task_executions
self.assertEqual(1, len(tasks))
task1 = self._assert_single_item(tasks, name='task1')
self.assertEqual(states.RUNNING, task1.state)
| apache-2.0 | -2,862,481,186,049,596,400 | 29.623894 | 78 | 0.594712 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Tasking/Mcl_Cmd_PasswordDump_Tasking.py | 1 | 2486 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: Mcl_Cmd_PasswordDump_Tasking.py
CMD_PW_TYPE_ALL = 0
CMD_PW_TYPE_PERMANENT = 1
CMD_PW_TYPE_CACHED = 2
CMD_PW_TYPE_DIGEST = 3
def TaskingMain(namespace):
import mcl.imports
import mcl.target
import mcl.tasking
import mcl.tasking.technique
from mcl.object.Message import MarshalMessage
mcl.imports.ImportWithNamespace(namespace, 'mca.survey.cmd.passworddump', globals())
mcl.imports.ImportWithNamespace(namespace, 'mca.survey.cmd.passworddump.tasking', globals())
lpParams = mcl.tasking.GetParameters()
tgtParams = mca.survey.cmd.passworddump.Params()
tgtParams.threadProvider = mcl.tasking.technique.Lookup('PASSWORDDUMP', mcl.tasking.technique.TECHNIQUE_MCL_INJECT, lpParams['thread'])
tgtParams.memoryProvider = mcl.tasking.technique.Lookup('PASSWORDDUMP', mcl.tasking.technique.TECHNIQUE_MCL_MEMORY, lpParams['memory'])
if lpParams['type'] == CMD_PW_TYPE_ALL:
tgtParams.type = mca.survey.cmd.passworddump.PARAMS_TYPE_FLAGS_PERMANENT | mca.survey.cmd.passworddump.PARAMS_TYPE_FLAGS_CACHED
elif lpParams['type'] == CMD_PW_TYPE_PERMANENT:
tgtParams.type = mca.survey.cmd.passworddump.PARAMS_TYPE_FLAGS_PERMANENT
elif lpParams['type'] == CMD_PW_TYPE_CACHED:
tgtParams.type = mca.survey.cmd.passworddump.PARAMS_TYPE_FLAGS_CACHED
elif lpParams['type'] == CMD_PW_TYPE_DIGEST:
tgtParams.type = mca.survey.cmd.passworddump.PARAMS_TYPE_FLAGS_DIGEST
else:
mcl.tasking.OutputError('Invalid password type (%u)' % lpParams['type'])
return False
rpc = mca.survey.cmd.passworddump.tasking.RPC_INFO_DUMP
msg = MarshalMessage()
tgtParams.Marshal(msg)
rpc.SetData(msg.Serialize())
rpc.SetMessagingType('message')
taskXml = mcl.tasking.Tasking()
taskXml.AddProvider(mcl.tasking.technique.TECHNIQUE_MCL_MEMORY, tgtParams.memoryProvider)
taskXml.AddProvider(mcl.tasking.technique.TECHNIQUE_MCL_INJECT, tgtParams.threadProvider)
mcl.tasking.OutputXml(taskXml.GetXmlObject())
res = mcl.tasking.RpcPerformCall(rpc)
if res != mcl.target.CALL_SUCCEEDED:
mcl.tasking.RecordModuleError(res, 0, mca.survey.cmd.passworddump.errorStrings)
return False
return True
if __name__ == '__main__':
import sys
if TaskingMain(sys.argv[1]) != True:
sys.exit(-1) | unlicense | -3,902,764,518,018,893,000 | 45.924528 | 139 | 0.723652 | false |
orlenko/sfpirg | sfpirgapp/views/actiongroups.py | 1 | 5045 | from django.conf import settings
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.http.response import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from mezzanine.utils.email import send_mail_template
from sfpirgapp.forms import ActionGroupForm
from sfpirgapp.models import ActionGroup, Settings
import logging
from sfpirgapp.forms import ActionGroupRequestForm
from django.shortcuts import resolve_url
from django.contrib.auth.decorators import login_required
from sfpirgapp.templatetags.sfpirg_tags import _category_by_model
log = logging.getLogger(__name__)
def aglist(request):
aglist = ActionGroup.objects.all().order_by('title')
paginator = Paginator(aglist, 10)
page = request.GET.get('page')
try:
aglist = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
aglist = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
aglist = paginator.page(paginator.num_pages)
context = RequestContext(request, locals())
return render_to_response('sfpirg/aglist.html', {}, context_instance=context)
def actiongroup(request, slug):
actiongroup = get_object_or_404(ActionGroup, slug=slug)
page = actiongroup
current_item = page.title
form = None
if 'edit' in request.REQUEST:
form = ActionGroupForm(request.POST or None, request.FILES or None, instance=actiongroup)
if request.method == 'POST' and form.is_valid():
form.save()
return HttpResponseRedirect(actiongroup.get_absolute_url())
else:
if not (request.user.is_superuser or request.user == actiongroup.user):
return HttpResponseRedirect(actiongroup.get_absolute_url())
context = RequestContext(request, locals())
return render_to_response('pages/actiongroup.html', {}, context_instance=context)
def request_group(request):
try:
form = ActionGroupRequestForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
actiongroup = form.instance
send_mail_template('Action Group Application Submitted: %s' % actiongroup.title,
'sfpirg/email/ag_application',
Settings.get_setting('SERVER_EMAIL'),
actiongroup.contact_email,
context=locals(),
attachments=None,
fail_silently=settings.DEBUG,
addr_bcc=None)
send_mail_template('Action Group Application Submitted: %s' % actiongroup.title,
'sfpirg/email/ag_admin_application',
Settings.get_setting('SERVER_EMAIL'),
Settings.get_setting('ACTION_GROUPS_ADMIN_EMAIL'),
context=locals(),
attachments=None,
fail_silently=settings.DEBUG,
addr_bcc=None)
return HttpResponseRedirect(resolve_url('thankyou'))
current_item = 'Action Group Request'
context = RequestContext(request, locals())
return render_to_response('sfpirg/action_group_request.html', {}, context_instance=context)
except:
log.error('Failed to process request', exc_info=1)
@login_required
def create(request):
user = request.user
# See if this user already has an actiongroup - no need to create then.
for existing in ActionGroup.objects.filter(user=user):
return HttpResponseRedirect(existing.get_absolute_url() + '?edit=1')
initial = {'user': user, 'status': 1, '_order': 0}
cat = _category_by_model(ActionGroup)
initial['category'] = cat
form = ActionGroupForm(request.POST or None, request.FILES or None, initial=initial)
if request.method == 'POST' and form.is_valid():
form.save()
actiongroup = form.instance
send_mail_template('Action Group Application Submitted: %s' % actiongroup.title,
'sfpirg/email/ag_application',
Settings.get_setting('SERVER_EMAIL'),
user.email,
context=locals(),
attachments=None,
fail_silently=settings.DEBUG,
addr_bcc=None)
send_mail_template('Action Group Application Submitted: %s' % actiongroup.title,
'sfpirg/email/ag_admin_application',
Settings.get_setting('SERVER_EMAIL'),
Settings.get_setting('ACTION_GROUPS_ADMIN_EMAIL'),
context=locals(),
attachments=None,
fail_silently=settings.DEBUG,
addr_bcc=None)
return HttpResponseRedirect(resolve_url('thankyou'))
current_item = 'Create Action Group'
context = RequestContext(request, locals())
return render_to_response('sfpirg/action_group_create.html', {}, context_instance=context)
| bsd-2-clause | -6,149,214,096,364,828,000 | 42.869565 | 99 | 0.649554 | false |
cshallue/models | research/differential_privacy/pate/smooth_sensitivity.py | 1 | 13739 | # Copyright 2017 The 'Scalable Private Learning with PATE' Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for smooth sensitivity analysis for PATE mechanisms.
This library implements functionality for doing smooth sensitivity analysis
for Gaussian Noise Max (GNMax), Threshold with Gaussian noise, and Gaussian
Noise with Smooth Sensitivity (GNSS) mechanisms.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
import numpy as np
import scipy
import sympy as sp
import core as pate
################################
# SMOOTH SENSITIVITY FOR GNMAX #
################################
# Global dictionary for storing cached q0 values keyed by (sigma, order).
_logq0_cache = {}
def _compute_logq0(sigma, order):
key = (sigma, order)
if key in _logq0_cache:
return _logq0_cache[key]
logq0 = compute_logq0_gnmax(sigma, order)
_logq0_cache[key] = logq0 # Update the global variable.
return logq0
def _compute_logq1(sigma, order, num_classes):
logq0 = _compute_logq0(sigma, order) # Most likely already cached.
logq1 = math.log(_compute_bl_gnmax(math.exp(logq0), sigma, num_classes))
assert logq1 <= logq0
return logq1
def _compute_mu1_mu2_gnmax(sigma, logq):
# Computes mu1, mu2 according to Proposition 10.
mu2 = sigma * math.sqrt(-logq)
mu1 = mu2 + 1
return mu1, mu2
def _compute_data_dep_bound_gnmax(sigma, logq, order):
# Applies Theorem 6 in Appendix without checking that logq satisfies necessary
# constraints. The pre-conditions must be assured by comparing logq against
# logq0 by the caller.
variance = sigma**2
mu1, mu2 = _compute_mu1_mu2_gnmax(sigma, logq)
eps1 = mu1 / variance
eps2 = mu2 / variance
log1q = np.log1p(-math.exp(logq)) # log1q = log(1-q)
log_a = (order - 1) * (
log1q - (np.log1p(-math.exp((logq + eps2) * (1 - 1 / mu2)))))
log_b = (order - 1) * (eps1 - logq / (mu1 - 1))
return np.logaddexp(log1q + log_a, logq + log_b) / (order - 1)
def _compute_rdp_gnmax(sigma, logq, order):
logq0 = _compute_logq0(sigma, order)
if logq >= logq0:
return pate.rdp_data_independent_gaussian(sigma, order)
else:
return _compute_data_dep_bound_gnmax(sigma, logq, order)
def compute_logq0_gnmax(sigma, order):
"""Computes the point where we start using data-independent bounds.
Args:
sigma: std of the Gaussian noise
order: Renyi order lambda
Returns:
logq0: the point above which the data-ind bound overtakes data-dependent
bound.
"""
def _check_validity_conditions(logq):
# Function returns true iff logq is in the range where data-dependent bound
# is valid. (Theorem 6 in Appendix.)
mu1, mu2 = _compute_mu1_mu2_gnmax(sigma, logq)
if mu1 < order:
return False
eps2 = mu2 / sigma**2
# Do computation in the log space. The condition below comes from Lemma 9
# from Appendix.
return (logq <= (mu2 - 1) * eps2 - mu2 * math.log(mu1 / (mu1 - 1) * mu2 /
(mu2 - 1)))
def _compare_dep_vs_ind(logq):
return (_compute_data_dep_bound_gnmax(sigma, logq, order) -
pate.rdp_data_independent_gaussian(sigma, order))
# Natural upper bounds on q0.
logub = min(-(1 + 1. / sigma)**2, -((order - .99) / sigma)**2, -1 / sigma**2)
assert _check_validity_conditions(logub)
# If data-dependent bound is already better, we are done already.
if _compare_dep_vs_ind(logub) < 0:
return logub
# Identifying a reasonable lower bound to bracket logq0.
loglb = 2 * logub # logub is negative, and thus loglb < logub.
while _compare_dep_vs_ind(loglb) > 0:
assert loglb > -10000, "The lower bound on q0 is way too low."
loglb *= 1.5
logq0, r = scipy.optimize.brentq(
_compare_dep_vs_ind, loglb, logub, full_output=True)
assert r.converged, "The root finding procedure failed to converge."
assert _check_validity_conditions(logq0) # just in case.
return logq0
def _compute_bl_gnmax(q, sigma, num_classes):
return ((num_classes - 1) / 2 * scipy.special.erfc(
1 / sigma + scipy.special.erfcinv(2 * q / (num_classes - 1))))
def _compute_bu_gnmax(q, sigma, num_classes):
return min(1, (num_classes - 1) / 2 * scipy.special.erfc(
-1 / sigma + scipy.special.erfcinv(2 * q / (num_classes - 1))))
def _compute_local_sens_gnmax(logq, sigma, num_classes, order):
"""Implements Algorithm 3 (computes an upper bound on local sensitivity).
(See Proposition 13 for proof of correctness.)
"""
logq0 = _compute_logq0(sigma, order)
logq1 = _compute_logq1(sigma, order, num_classes)
if logq1 <= logq <= logq0:
logq = logq1
beta = _compute_rdp_gnmax(sigma, logq, order)
beta_bu_q = _compute_rdp_gnmax(
sigma, math.log(_compute_bu_gnmax(math.exp(logq), sigma, num_classes)),
order)
beta_bl_q = _compute_rdp_gnmax(
sigma, math.log(_compute_bl_gnmax(math.exp(logq), sigma, num_classes)),
order)
return max(beta_bu_q - beta, beta - beta_bl_q)
def compute_local_sensitivity_bounds_gnmax(votes, num_teachers, sigma, order):
"""Computes a list of max-LS-at-distance-d for the GNMax mechanism.
A more efficient implementation of Algorithms 4 and 5 working in time
O(teachers*classes). A naive implementation is O(teachers^2*classes) or worse.
Args:
votes: A numpy array of votes.
num_teachers: Total number of voting teachers.
sigma: Standard deviation of the Guassian noise.
order: The Renyi order.
Returns:
A numpy array of local sensitivities at distances d, 0 <= d <= num_teachers.
"""
num_classes = len(votes) # Called m in the paper.
logq0 = _compute_logq0(sigma, order)
logq1 = _compute_logq1(sigma, order, num_classes)
logq = pate.compute_logq_gaussian(votes, sigma)
plateau = _compute_local_sens_gnmax(logq1, sigma, num_classes, order)
res = np.full(num_teachers, plateau)
if logq1 <= logq <= logq0:
return res
# Invariant: votes is sorted in the non-increasing order.
votes = sorted(votes, reverse=True)
res[0] = _compute_local_sens_gnmax(logq, sigma, num_classes, order)
curr_d = 0
go_left = logq > logq0 # Otherwise logq < logq1 and we go right.
# Iterate while the following is true:
# 1. If we are going left, logq is still larger than logq0 and we may still
# increase the gap between votes[0] and votes[1].
# 2. If we are going right, logq is still smaller than logq1.
while ((go_left and logq > logq0 and votes[1] > 0) or
(not go_left and logq < logq1)):
curr_d += 1
if go_left: # Try decreasing logq.
votes[0] += 1
votes[1] -= 1
idx = 1
# Restore the invariant. (Can be implemented more efficiently by keeping
# track of the range of indices equal to votes[1]. Does not seem to matter
# for the overall running time.)
while idx < len(votes) - 1 and votes[idx] < votes[idx + 1]:
votes[idx], votes[idx + 1] = votes[idx + 1], votes[idx]
idx += 1
else: # Go right, i.e., try increasing logq.
votes[0] -= 1
votes[1] += 1 # The invariant holds since otherwise logq >= logq1.
logq = pate.compute_logq_gaussian(votes, sigma)
res[curr_d] = _compute_local_sens_gnmax(logq, sigma, num_classes, order)
return res
##################################################
# SMOOTH SENSITIVITY FOR THE THRESHOLD MECHANISM #
##################################################
# A global dictionary of RDPs for various threshold values. Indexed by a 4-tuple
# (num_teachers, threshold, sigma, order).
_rdp_thresholds = {}
def _compute_rdp_list_threshold(num_teachers, threshold, sigma, order):
key = (num_teachers, threshold, sigma, order)
if key in _rdp_thresholds:
return _rdp_thresholds[key]
res = np.zeros(num_teachers + 1)
for v in range(0, num_teachers + 1):
logp = scipy.stats.norm.logsf(threshold - v, scale=sigma)
res[v] = pate.compute_rdp_threshold(logp, sigma, order)
_rdp_thresholds[key] = res
return res
def compute_local_sensitivity_bounds_threshold(counts, num_teachers, threshold,
sigma, order):
"""Computes a list of max-LS-at-distance-d for the threshold mechanism."""
def _compute_ls(v):
ls_step_up, ls_step_down = None, None
if v > 0:
ls_step_down = abs(rdp_list[v - 1] - rdp_list[v])
if v < num_teachers:
ls_step_up = abs(rdp_list[v + 1] - rdp_list[v])
return max(ls_step_down, ls_step_up) # Rely on max(x, None) = x.
cur_max = int(round(max(counts)))
rdp_list = _compute_rdp_list_threshold(num_teachers, threshold, sigma, order)
ls = np.zeros(num_teachers)
for d in range(max(cur_max, num_teachers - cur_max)):
ls_up, ls_down = None, None
if cur_max + d <= num_teachers:
ls_up = _compute_ls(cur_max + d)
if cur_max - d >= 0:
ls_down = _compute_ls(cur_max - d)
ls[d] = max(ls_up, ls_down)
return ls
#############################################
# PROCEDURES FOR SMOOTH SENSITIVITY RELEASE #
#############################################
# A global dictionary of exponentially decaying arrays. Indexed by beta.
dict_beta_discount = {}
def compute_discounted_max(beta, a):
n = len(a)
if beta not in dict_beta_discount or (len(dict_beta_discount[beta]) < n):
dict_beta_discount[beta] = np.exp(-beta * np.arange(n))
return max(a * dict_beta_discount[beta][:n])
def compute_smooth_sensitivity_gnmax(beta, counts, num_teachers, sigma, order):
"""Computes smooth sensitivity of a single application of GNMax."""
ls = compute_local_sensitivity_bounds_gnmax(counts, sigma, order,
num_teachers)
return compute_discounted_max(beta, ls)
def compute_rdp_of_smooth_sensitivity_gaussian(beta, sigma, order):
"""Computes the RDP curve for the GNSS mechanism.
Implements Theorem 23 (https://arxiv.org/pdf/1802.08908.pdf).
"""
if beta > 0 and not 1 < order < 1 / (2 * beta):
raise ValueError("Order outside the (1, 1/(2*beta)) range.")
return order * math.exp(2 * beta) / sigma**2 + (
-.5 * math.log(1 - 2 * order * beta) + beta * order) / (
order - 1)
def compute_params_for_ss_release(eps, delta):
"""Computes sigma for additive Gaussian noise scaled by smooth sensitivity.
Presently not used. (We proceed via RDP analysis.)
Compute beta, sigma for applying Lemma 2.6 (full version of Nissim et al.) via
Lemma 2.10.
"""
# Rather than applying Lemma 2.10 directly, which would give suboptimal alpha,
# (see http://www.cse.psu.edu/~ads22/pubs/NRS07/NRS07-full-draft-v1.pdf),
# we extract a sufficient condition on alpha from its proof.
#
# Let a = rho_(delta/2)(Z_1). Then solve for alpha such that
# 2 alpha a + alpha^2 = eps/2.
a = scipy.special.ndtri(1 - delta / 2)
alpha = math.sqrt(a**2 + eps / 2) - a
beta = eps / (2 * scipy.special.chdtri(1, delta / 2))
return alpha, beta
#######################################################
# SYMBOLIC-NUMERIC VERIFICATION OF CONDITIONS C5--C6. #
#######################################################
def _construct_symbolic_beta(q, sigma, order):
mu2 = sigma * sp.sqrt(sp.log(1 / q))
mu1 = mu2 + 1
eps1 = mu1 / sigma**2
eps2 = mu2 / sigma**2
a = (1 - q) / (1 - (q * sp.exp(eps2))**(1 - 1 / mu2))
b = sp.exp(eps1) / q**(1 / (mu1 - 1))
s = (1 - q) * a**(order - 1) + q * b**(order - 1)
return (1 / (order - 1)) * sp.log(s)
def _construct_symbolic_bu(q, sigma, m):
return (m - 1) / 2 * sp.erfc(sp.erfcinv(2 * q / (m - 1)) - 1 / sigma)
def _is_non_decreasing(fn, q, bounds):
"""Verifies whether the function is non-decreasing within a range.
Args:
fn: Symbolic function of a single variable.
q: The name of f's variable.
bounds: Pair of (lower_bound, upper_bound) reals.
Returns:
True iff the function is non-decreasing in the range.
"""
diff_fn = sp.diff(fn, q) # Symbolically compute the derivative.
diff_fn_lambdified = sp.lambdify(
q,
diff_fn,
modules=[
"numpy", {
"erfc": scipy.special.erfc,
"erfcinv": scipy.special.erfcinv
}
])
r = scipy.optimize.minimize_scalar(
diff_fn_lambdified, bounds=bounds, method="bounded")
assert r.success, "Minimizer failed to converge."
return r.fun >= 0 # Check whether the derivative is non-negative.
def check_conditions(sigma, m, order):
"""Checks conditions C5 and C6 (Section B.4.2 in Appendix)."""
q = sp.symbols("q", positive=True, real=True)
beta = _construct_symbolic_beta(q, sigma, order)
q0 = math.exp(compute_logq0_gnmax(sigma, order))
cond5 = _is_non_decreasing(beta, q, (0, q0))
if cond5:
bl_q0 = _compute_bl_gnmax(q0, sigma, m)
bu = _construct_symbolic_bu(q, sigma, m)
delta_beta = beta.subs(q, bu) - beta
cond6 = _is_non_decreasing(delta_beta, q, (0, bl_q0))
else:
cond6 = False # Skip the check, since Condition 5 is false already.
return (cond5, cond6)
def main(argv):
del argv # Unused.
if __name__ == "__main__":
app.run(main)
| apache-2.0 | -4,641,048,824,878,597,000 | 31.789976 | 87 | 0.635126 | false |
kevin-intel/scikit-learn | sklearn/feature_selection/_mutual_info.py | 3 | 16639 | # Author: Nikolay Mayorov <[email protected]>
# License: 3-clause BSD
import numpy as np
from scipy.sparse import issparse
from scipy.special import digamma
from ..metrics.cluster import mutual_info_score
from ..neighbors import NearestNeighbors, KDTree
from ..preprocessing import scale
from ..utils import check_random_state
from ..utils.fixes import _astype_copy_false
from ..utils.validation import check_array, check_X_y
from ..utils.multiclass import check_classification_targets
def _compute_mi_cc(x, y, n_neighbors):
"""Compute mutual information between two continuous variables.
Parameters
----------
x, y : ndarray, shape (n_samples,)
Samples of two continuous random variables, must have an identical
shape.
n_neighbors : int
Number of nearest neighbors to search for each point, see [1]_.
Returns
-------
mi : float
Estimated mutual information. If it turned out to be negative it is
replace by 0.
Notes
-----
True mutual information can't be negative. If its estimate by a numerical
method is negative, it means (providing the method is adequate) that the
mutual information is close to 0 and replacing it by 0 is a reasonable
strategy.
References
----------
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
"""
n_samples = x.size
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
xy = np.hstack((x, y))
# Here we rely on NearestNeighbors to select the fastest algorithm.
nn = NearestNeighbors(metric='chebyshev', n_neighbors=n_neighbors)
nn.fit(xy)
radius = nn.kneighbors()[0]
radius = np.nextafter(radius[:, -1], 0)
# KDTree is explicitly fit to allow for the querying of number of
# neighbors within a specified radius
kd = KDTree(x, metric='chebyshev')
nx = kd.query_radius(x, radius, count_only=True, return_distance=False)
nx = np.array(nx) - 1.0
kd = KDTree(y, metric='chebyshev')
ny = kd.query_radius(y, radius, count_only=True, return_distance=False)
ny = np.array(ny) - 1.0
mi = (digamma(n_samples) + digamma(n_neighbors) -
np.mean(digamma(nx + 1)) - np.mean(digamma(ny + 1)))
return max(0, mi)
def _compute_mi_cd(c, d, n_neighbors):
"""Compute mutual information between continuous and discrete variables.
Parameters
----------
c : ndarray, shape (n_samples,)
Samples of a continuous random variable.
d : ndarray, shape (n_samples,)
Samples of a discrete random variable.
n_neighbors : int
Number of nearest neighbors to search for each point, see [1]_.
Returns
-------
mi : float
Estimated mutual information. If it turned out to be negative it is
replace by 0.
Notes
-----
True mutual information can't be negative. If its estimate by a numerical
method is negative, it means (providing the method is adequate) that the
mutual information is close to 0 and replacing it by 0 is a reasonable
strategy.
References
----------
.. [1] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
"""
n_samples = c.shape[0]
c = c.reshape((-1, 1))
radius = np.empty(n_samples)
label_counts = np.empty(n_samples)
k_all = np.empty(n_samples)
nn = NearestNeighbors()
for label in np.unique(d):
mask = d == label
count = np.sum(mask)
if count > 1:
k = min(n_neighbors, count - 1)
nn.set_params(n_neighbors=k)
nn.fit(c[mask])
r = nn.kneighbors()[0]
radius[mask] = np.nextafter(r[:, -1], 0)
k_all[mask] = k
label_counts[mask] = count
# Ignore points with unique labels.
mask = label_counts > 1
n_samples = np.sum(mask)
label_counts = label_counts[mask]
k_all = k_all[mask]
c = c[mask]
radius = radius[mask]
kd = KDTree(c)
m_all = kd.query_radius(c, radius, count_only=True, return_distance=False)
m_all = np.array(m_all) - 1.0
mi = (digamma(n_samples) + np.mean(digamma(k_all)) -
np.mean(digamma(label_counts)) -
np.mean(digamma(m_all + 1)))
return max(0, mi)
def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3):
"""Compute mutual information between two variables.
This is a simple wrapper which selects a proper function to call based on
whether `x` and `y` are discrete or not.
"""
if x_discrete and y_discrete:
return mutual_info_score(x, y)
elif x_discrete and not y_discrete:
return _compute_mi_cd(y, x, n_neighbors)
elif not x_discrete and y_discrete:
return _compute_mi_cd(x, y, n_neighbors)
else:
return _compute_mi_cc(x, y, n_neighbors)
def _iterate_columns(X, columns=None):
"""Iterate over columns of a matrix.
Parameters
----------
X : ndarray or csc_matrix, shape (n_samples, n_features)
Matrix over which to iterate.
columns : iterable or None, default=None
Indices of columns to iterate over. If None, iterate over all columns.
Yields
------
x : ndarray, shape (n_samples,)
Columns of `X` in dense format.
"""
if columns is None:
columns = range(X.shape[1])
if issparse(X):
for i in columns:
x = np.zeros(X.shape[0])
start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1]
x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr]
yield x
else:
for i in columns:
yield X[:, i]
def _estimate_mi(X, y, discrete_features='auto', discrete_target=False,
n_neighbors=3, copy=True, random_state=None):
"""Estimate mutual information between the features and the target.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : {'auto', bool, array-like}, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
discrete_target : bool, default=False
Whether to consider `y` as a discrete variable.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [1]_ and [2]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target.
A negative value will be replaced by 0.
References
----------
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
"""
X, y = check_X_y(X, y, accept_sparse='csc', y_numeric=not discrete_target)
n_samples, n_features = X.shape
if isinstance(discrete_features, (str, bool)):
if isinstance(discrete_features, str):
if discrete_features == 'auto':
discrete_features = issparse(X)
else:
raise ValueError("Invalid string value for discrete_features.")
discrete_mask = np.empty(n_features, dtype=bool)
discrete_mask.fill(discrete_features)
else:
discrete_features = check_array(discrete_features, ensure_2d=False)
if discrete_features.dtype != 'bool':
discrete_mask = np.zeros(n_features, dtype=bool)
discrete_mask[discrete_features] = True
else:
discrete_mask = discrete_features
continuous_mask = ~discrete_mask
if np.any(continuous_mask) and issparse(X):
raise ValueError("Sparse matrix `X` can't have continuous features.")
rng = check_random_state(random_state)
if np.any(continuous_mask):
if copy:
X = X.copy()
if not discrete_target:
X[:, continuous_mask] = scale(X[:, continuous_mask],
with_mean=False, copy=False)
# Add small noise to continuous features as advised in Kraskov et. al.
X = X.astype(float, **_astype_copy_false(X))
means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0))
X[:, continuous_mask] += 1e-10 * means * rng.randn(
n_samples, np.sum(continuous_mask))
if not discrete_target:
y = scale(y, with_mean=False)
y += 1e-10 * np.maximum(1, np.mean(np.abs(y))) * rng.randn(n_samples)
mi = [_compute_mi(x, y, discrete_feature, discrete_target, n_neighbors) for
x, discrete_feature in zip(_iterate_columns(X), discrete_mask)]
return np.array(mi)
def mutual_info_regression(X, y, *, discrete_features='auto', n_neighbors=3,
copy=True, random_state=None):
"""Estimate mutual information for a continuous target variable.
Mutual information (MI) [1]_ between two random variables is a non-negative
value, which measures the dependency between the variables. It is equal
to zero if and only if two random variables are independent, and higher
values mean higher dependency.
The function relies on nonparametric methods based on entropy estimation
from k-nearest neighbors distances as described in [2]_ and [3]_. Both
methods are based on the idea originally proposed in [4]_.
It can be used for univariate features selection, read more in the
:ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : {'auto', bool, array-like}, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [2]_ and [3]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target.
Notes
-----
1. The term "discrete features" is used instead of naming them
"categorical", because it describes the essence more accurately.
For example, pixel intensities of an image are discrete features
(but hardly categorical) and you will get better results if mark them
as such. Also note, that treating a continuous variable as discrete and
vice versa will usually give incorrect results, so be attentive about
that.
2. True mutual information can't be negative. If its estimate turns out
to be negative, it is replaced by zero.
References
----------
.. [1] `Mutual Information
<https://en.wikipedia.org/wiki/Mutual_information>`_
on Wikipedia.
.. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [3] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
.. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16
"""
return _estimate_mi(X, y, discrete_features, False, n_neighbors,
copy, random_state)
def mutual_info_classif(X, y, *, discrete_features='auto', n_neighbors=3,
copy=True, random_state=None):
"""Estimate mutual information for a discrete target variable.
Mutual information (MI) [1]_ between two random variables is a non-negative
value, which measures the dependency between the variables. It is equal
to zero if and only if two random variables are independent, and higher
values mean higher dependency.
The function relies on nonparametric methods based on entropy estimation
from k-nearest neighbors distances as described in [2]_ and [3]_. Both
methods are based on the idea originally proposed in [4]_.
It can be used for univariate features selection, read more in the
:ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : {'auto', bool, array-like}, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [2]_ and [3]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target.
Notes
-----
1. The term "discrete features" is used instead of naming them
"categorical", because it describes the essence more accurately.
For example, pixel intensities of an image are discrete features
(but hardly categorical) and you will get better results if mark them
as such. Also note, that treating a continuous variable as discrete and
vice versa will usually give incorrect results, so be attentive about
that.
2. True mutual information can't be negative. If its estimate turns out
to be negative, it is replaced by zero.
References
----------
.. [1] `Mutual Information
<https://en.wikipedia.org/wiki/Mutual_information>`_
on Wikipedia.
.. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [3] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
.. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16
"""
check_classification_targets(y)
return _estimate_mi(X, y, discrete_features, True, n_neighbors,
copy, random_state)
| bsd-3-clause | -3,846,150,620,233,349,600 | 36.307175 | 79 | 0.640603 | false |
Ingener74/Old-Star | video.py | 1 | 1386 | # encoding: utf8
from PySide.QtCore import Qt, QThread
from PySide.QtGui import QWidget, QApplication
import sys
from librtmp import RTMP, RTMPError
from res import (Ui_VideoWidget)
class StreamThread(QThread):
def __init__(self):
QThread.__init__(self)
def run(self):
try:
rtmp = RTMP(url='rtmp://127.0.0.1:1935/live/test')
print '1'
print rtmp
print '2'
print rtmp.connect()
print '3'
pkt = rtmp.read_packet()
print '4'
print pkt
print '5'
stream = rtmp.create_stream()
print '6'
print stream
# data = stream.read(1024)
except RTMPError, e:
print e
class Video(QWidget, Ui_VideoWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setupUi(self)
self.controlButton.clicked.connect(self.onControlButton)
def onControlButton(self):
self.streamThread = StreamThread()
self.streamThread.start()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.close()
if __name__ == '__main__':
# noinspection PyCallByClass
QApplication.setStyle("plastique")
app = QApplication(sys.argv)
video = Video()
video.show()
sys.exit(app.exec_())
| lgpl-3.0 | 8,452,937,607,431,824,000 | 20.323077 | 64 | 0.560606 | false |
buret/pylmflib | pylmflib/morphology/component.py | 1 | 1531 | #! /usr/bin/env python
"""! @package morphology
"""
class Component():
def __init__(self, position=None, lexeme=None):
"""! @brief Constructor.
Component instances are owned by ListOfComponents.
@param position The position of the component in the multiword expression.
@param targets Related lexeme.
@return A Component instance.
"""
self.position = position
# Composed LexicalEntry lexeme
self.targets = lexeme
## Pointer to an existing LexicalEntry
# There is one LexicalEntry pointer by Component instance
self.__lexical_entry = None
def __del__(self):
"""! @brief Destructor.
"""
# Decrement the reference count on pointed objects
self.__lexical_entry = None
def set_lexical_entry(self, lexical_entry):
"""! @brief Set pointer to the component lexical entry instance. This function can only be called once the full dictionary has been parsed.
@param lexical_entry The component LexicalEntry.
@return Component instance.
"""
self.__lexical_entry = lexical_entry
return self
def get_lexical_entry(self):
"""! @brief Get pointed lexical entry.
@return Component private attribute '__lexical_entry'.
"""
return self.__lexical_entry
def get_lexeme(self):
"""! @brief Get component LexicalEntry lexeme.
@return Component attribute 'targets'.
"""
return self.targets
| gpl-2.0 | -6,142,575,380,407,489,000 | 33.022222 | 147 | 0.625735 | false |
cbertinato/pandas | pandas/tests/indexes/timedeltas/test_indexing.py | 1 | 13473 | from datetime import datetime, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Timedelta, TimedeltaIndex, timedelta_range
import pandas.util.testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
assert result == Timedelta('1 day')
result = idx[0:5]
expected = timedelta_range('1 day', '5 day', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = timedelta_range('1 day', '9 day', freq='2D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = timedelta_range('12 day', '24 day', freq='3D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
@pytest.mark.parametrize('key', [pd.Timestamp('1970-01-01'),
pd.Timestamp('1970-01-02'),
datetime(1970, 1, 1)])
def test_timestamp_invalid_key(self, key):
# GH#20464
tdi = pd.timedelta_range(0, periods=10)
with pytest.raises(TypeError):
tdi.get_loc(key)
class TestWhere:
# placeholder for symmetry with DatetimeIndex and PeriodIndex tests
pass
class TestTake:
def test_take(self):
# GH 10295
idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
assert result == Timedelta('1 day')
result = idx.take([-1])
assert result == Timedelta('31 day')
result = idx.take([0, 1, 2])
expected = timedelta_range('1 day', '3 day', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = timedelta_range('1 day', '5 day', freq='2D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode='clip')
# TODO: This method came from test_timedelta; de-dup with version above
def test_take2(self):
tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00']
idx = timedelta_range(start='1d', end='2d', freq='H', name='idx')
expected = TimedeltaIndex(tds, freq=None, name='idx')
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2, 4, 10]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, TimedeltaIndex)
assert taken.freq is None
assert taken.name == expected.name
def test_take_fill_value(self):
# GH 12631
idx = TimedeltaIndex(['1 days', '2 days', '3 days'],
name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = TimedeltaIndex(['2 days', '1 days', 'NaT'],
name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
class TestTimedeltaIndex:
def test_insert(self):
idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx')
result = idx.insert(2, timedelta(days=5))
exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx')
tm.assert_index_equal(result, exp)
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),
Timedelta('2day')], name='idx')
assert not isinstance(result, TimedeltaIndex)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')
# preserve freq
expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02',
'1day 00:00:03'],
name='idx', freq='s')
expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:04'],
name='idx', freq='s')
# reset freq to None
expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01',
'1day 00:00:02', '1day 00:00:03'],
name='idx', freq=None)
expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:05'],
name='idx', freq=None)
cases = [(0, Timedelta('1day'), expected_0),
(-3, Timedelta('1day'), expected_0),
(3, Timedelta('1day 00:00:04'), expected_3),
(1, Timedelta('1day 00:00:01'), expected_1_nofreq),
(3, Timedelta('1day 00:00:05'), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
# GH 18295 (test missing)
expected = TimedeltaIndex(['1day', pd.NaT, '2day', '3day'])
for na in (np.nan, pd.NaT, None):
result = timedelta_range('1day', '3day').insert(1, na)
tm.assert_index_equal(result, expected)
def test_delete(self):
idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')
# prserve freq
expected_0 = timedelta_range(start='2 Days', periods=4, freq='D',
name='idx')
expected_4 = timedelta_range(start='1 Days', periods=4, freq='D',
name='idx')
# reset freq to None
expected_1 = TimedeltaIndex(
['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx')
cases = {0: expected_0,
-5: expected_0,
-1: expected_4,
4: expected_4,
1: expected_1}
for n, expected in cases.items():
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
idx.delete(5)
def test_delete_slice(self):
idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D',
name='idx')
expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D',
name='idx')
# reset freq to None
expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d',
'7 d', '8 d', '9 d', '10d'],
freq=None, name='idx')
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in cases.items():
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
result = idx.delete(slice(n[0], n[-1] + 1))
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
def test_get_loc(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=Timedelta(0)) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=np.timedelta64(0, 's')) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=timedelta(0)) == 1
with pytest.raises(ValueError, match='unit abbreviation w/o a number'):
idx.get_loc(idx[1], method='nearest', tolerance='foo')
with pytest.raises(
ValueError,
match='tolerance size must match'):
idx.get_loc(idx[1], method='nearest',
tolerance=[Timedelta(0).to_timedelta64(),
Timedelta(0).to_timedelta64()])
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
assert idx.get_loc('1 day 1 hour', method) == loc
# GH 16909
assert idx.get_loc(idx[1].to_timedelta64()) == 1
# GH 16896
assert idx.get_loc('0 days') == 0
def test_get_loc_nat(self):
tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00'])
assert tidx.get_loc(pd.NaT) == 1
assert tidx.get_loc(None) == 1
assert tidx.get_loc(float('nan')) == 1
assert tidx.get_loc(np.nan) == 1
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
res = idx.get_indexer(target, 'nearest',
tolerance=Timedelta('1 hour'))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
| bsd-3-clause | 4,502,410,124,735,251,000 | 38.860947 | 79 | 0.5036 | false |
cybertec-postgresql/pgwatch2 | pgwatch2/metrics/00_helpers/rollout_helper.py | 1 | 16948 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# auto-detects PG ver, rolls-out all helpers not in exlude list, reports errors summary. dry-run first
# can only read monitoring DBs config from config DB or when specified per single DB / instance
import glob
import re
import psycopg2
import psycopg2.extras
import os
import argparse
import logging
import yaml
from pathlib import Path
args = None
def executeOnRemoteHost(sql, host, port, dbname, user, password='', sslmode='prefer', sslrootcert='', sslcert='', sslkey='', params=None, statement_timeout=None, quiet=False, target_schema=''):
result = []
conn = None
try:
# logging.debug('executing query on %s@%s/%s:', host, port, dbname)
# logging.debug(sql)
conn = psycopg2.connect(host=host, port=port, dbname=dbname, user=user, password=password,
sslmode=sslmode, sslrootcert=sslrootcert, sslcert=sslcert, sslkey=sslkey)
conn.autocommit = True
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
if statement_timeout:
cur.execute("SET statement_timeout TO '{}'".format(
statement_timeout))
if target_schema:
cur.execute("SET search_path TO {}".format(
target_schema))
cur.execute(sql, params)
if cur.statusmessage.startswith('SELECT') or cur.description:
result = cur.fetchall()
else:
result = [{'rows_affected': str(cur.rowcount)}]
except Exception as e:
if quiet:
return result, str(e)
else:
logging.exception('failed to execute "{}" on remote host "{}:{}"'.format(sql, host, port))
raise
finally:
if conn:
try:
conn.close()
except:
logging.exception('failed to close connection')
return result, None
def get_pg_version_as_text(host, port, user, password=''):
sql = """select (regexp_matches(
regexp_replace(current_setting('server_version'), '(beta|devel).*', '', 'g'),
E'\\\\d+\\\\.?\\\\d+?')
)[1]::text as ver, not pg_is_in_recovery() as is_primary"""
logging.debug('getting PG version info from %s@%s...', host, port)
data, err = executeOnRemoteHost(sql, host, port, 'template1', user, password, quiet=True)
if err:
logging.debug('could not get PG version from %s@%s: %s', host, port, err)
return '', False
ver_full = data[0]['ver'] # will be in form of 10.11
is_primary = data[0]['is_primary']
s = ver_full.split('.')
if int(s[0]) >= 10:
return s[0], is_primary
else:
return s[0] + '.' + s[1], is_primary
# get real names if dbtype = 'postgres-continuous-discovery'
def resolve_configdb_host_to_dbs(md_entry):
ret = []
sql_all_enabled_dbs_on_instance = "select datname from pg_database where not datistemplate and datallowconn order by 1"
if md_entry['md_dbtype'] == 'postgres':
ret.append(md_entry)
elif md_entry['md_dbtype'] == 'postgres-continuous-discovery':
all_dbs, err = executeOnRemoteHost(sql_all_enabled_dbs_on_instance, md_entry['md_hostname'], md_entry['md_port'], 'template1', args.user, args.password, quiet=True)
if err:
logging.error('could not fetch DB listing from %s@%s: %s', md_entry['md_hostname'], md_entry['md_port'], err)
else:
for db in all_dbs:
e = md_entry.copy()
e['md_dbname'] = db['datname']
e['md_dbtype'] = 'postgres'
ret.append(e)
return ret
def get_active_dbs_from_configdb():
ret = []
sql = """select md_unique_name, md_hostname, md_port, md_dbname, md_user, md_password, md_sslmode, md_dbtype from pgwatch2.monitored_db where md_is_enabled and md_dbtype in ('postgres') order by 1"""
md_entries, err = executeOnRemoteHost(sql, args.configdb_host, args.configdb_port, args.configdb_dbname, args.configdb_user, args.configdb_password)
if err:
logging.fatal('could not connect to configDB: %s', err)
exit(1)
for md in md_entries:
logging.debug('found entry from config DB: hostname=%s, port=%s, dbname=%s, dbtype=%s, user=%s', md['md_hostname'], md['md_port'], md['md_dbname'], md['md_dbtype'], md['md_user'])
[ret.append(e) for e in resolve_configdb_host_to_dbs(md)]
return ret
def get_helper_sqls_from_configdb(pgver): # TODO caching
sql = """select distinct on (m_name) m_name as helper, m_sql as sql from metric where m_is_helper and m_is_active and m_pg_version_from <= %s order by m_name, m_pg_version_from desc;"""
helpers, err = executeOnRemoteHost(sql, args.configdb_host, args.configdb_port, args.configdb_dbname, args.configdb_user, args.configdb_password, params=(pgver,))
if err:
logging.fatal('could not connect to configDB: %s', err)
exit(1)
return helpers
def do_roll_out(md, pgver):
ok = 0
total = 0
if args.metrics_path:
helpers = get_helpers_from_filesystem(pgver)
else:
helpers = get_helper_sqls_from_configdb(pgver)
if args.helpers or args.excluded_helpers: # filter out unwanted helpers
helpers_filtered = []
if args.helpers:
wanted_helpers = args.helpers.split(',')
[helpers_filtered.append(h) for h in helpers if h['helper'] in wanted_helpers]
else:
unwanted_helpers = args.excluded_helpers.split(',')
[helpers_filtered.append(h) for h in helpers if h['helper'] not in unwanted_helpers]
helpers = helpers_filtered
for hp in helpers:
sql = hp['sql']
if args.monitoring_user != 'pgwatch2': # change helper definitions so that 'grant execute' is done for the monitoring role specified in the configuration
sql = re.sub(r'(?i)TO\s+pgwatch2', 'TO ' + args.monitoring_user, sql)
if args.python2:
sql = sql.replace('plpython3u', 'plpythonu')
all_dbs, err = executeOnRemoteHost(sql, md['md_hostname'], md['md_port'], md['md_dbname'], args.user, args.password, quiet=True, target_schema=args.target_schema)
if err:
logging.debug('failed to roll out %s: %s', hp['helper'], err)
else:
ok += 1
logging.debug('rollout of %s succeeded', hp['helper'])
total += 1
return ok, total
def get_helpers_from_filesystem(target_pgver):
ret = [] # [{'helper': 'get_x', 'sql': 'create ...',...}
target_pgver = float(target_pgver)
helpers = glob.glob(os.path.join(args.metrics_path, '*'))
for h in helpers:
if not os.path.isdir(h):
continue
vers = os.listdir(h)
numeric_vers = []
for v in vers:
try:
v_float = float(v)
except:
continue
if v_float >= 10 and h.endswith(".0"):
h = h.replace(".0", "")
numeric_vers.append((v, v_float))
if len(numeric_vers) == 0:
continue
numeric_vers.sort(key=lambda x: x[1])
best_matching_pgver = None
for nv, nv_float in numeric_vers:
if target_pgver >= nv_float:
best_matching_pgver = nv
if not best_matching_pgver:
logging.warning('could not find suitable helper for %s target ver %s, skipping', h, target_pgver)
continue
# logging.warning('found suitable helper for %s target ver %s', h, best_matching_pgver)
with open(os.path.join(h, str(best_matching_pgver), 'metric.sql'), 'r') as f:
sql = f.read()
ret.append({'helper': Path(h).stem, 'sql': sql})
ret.sort(key=lambda x: x['helper'])
return ret
# TODO handle libpq_conn_str
def get_monitored_dbs_from_yaml_config(): # active entries ("is_enabled": true) only. configs can be in subfolders also - all YAML/YML files will be searched for
ret = []
for root, dirs, files in os.walk(args.config_path):
for f in files:
if f.lower().endswith('.yml') or f.lower().endswith('.yaml'):
logging.debug('found a config file: %s', os.path.join(root, f))
with open(os.path.join(root, f), 'r') as fp:
config = fp.read()
try:
monitored_dbs = yaml.load(config)
except:
logging.error("skipping config file %s as could not parse YAML")
continue
if not monitored_dbs or not type(monitored_dbs) == list:
continue
for db in monitored_dbs:
if db.get('is_enabled'):
md = {'md_hostname': db.get('host'), 'md_port': db.get('port', 5432), 'md_dbname': db.get('dbname'),
'md_user': db.get('user'), 'md_password': db.get('password'),
'md_unique_name': db.get('unique_name'),
'md_dbtype': db.get('dbtype')}
[ret.append(e) for e in resolve_configdb_host_to_dbs(md)]
ret.sort(key=lambda x: x['md_unique_name'])
return ret
def main():
argp = argparse.ArgumentParser(description='Roll out pgwatch2 metric fetching helpers to all monitored DB-s configured in config DB or to a specified DB / instance (all DBs)')
# to use file based helper / config definitions
argp.add_argument('--metrics-path', dest='metrics_path', default='.', help='Path to the folder containing helper definitions. Current working directory by default')
argp.add_argument('--config-path', dest='config_path', default='', help='Path including YAML based monitoring config files. Subfolders are supported the same as with collector')
# pgwatch2 config db connect info
argp.add_argument('--configdb-host', dest='configdb_host', default='', help='pgwatch2 config DB host address')
argp.add_argument('--configdb-dbname', dest='configdb_dbname', default='pgwatch2', help='pgwatch2 config DB dbname (relevant in configdb mode)')
argp.add_argument('--configdb-port', dest='configdb_port', default='5432', help='pgwatch2 config DB port (relevant in configdb mode)')
argp.add_argument('--configdb-user', dest='configdb_user', default='postgres', help='pgwatch2 config DB user (relevant in configdb mode)')
argp.add_argument('--configdb-password', dest='configdb_password', default='', help='pgwatch2 config DB password (relevant in configdb mode)')
# rollout target db connect info
argp.add_argument('--host', dest='host', help='Host address for explicit single DB / instance rollout')
argp.add_argument('--port', dest='port', default=5432, type=int, help='Port for explicit single DB / instance rollout')
argp.add_argument('--dbname', dest='dbname', help='Explicit dbname for rollout')
argp.add_argument('-U', '--user', dest='user', help='Superuser username for helper function creation')
argp.add_argument('--password', dest='password', default='', help='Superuser password for helper function creation. The .pgpass file can also be used instead')
argp.add_argument('--monitoring-user', dest='monitoring_user', default='pgwatch2', help='The user getting execute privileges to created helpers (relevant for single or instance mode)')
argp.add_argument('--target-schema', dest='target_schema', default='', help='If specified, used to set the search_path')
argp.add_argument('-c', '--confirm', dest='confirm', action='store_true', default=False, help='perform the actual rollout')
argp.add_argument('-m', '--mode', dest='mode', default='', help='[configdb-all|yaml-all|single-db|single-instance]')
argp.add_argument('--helpers', dest='helpers', help='Roll out only listed (comma separated) helpers. By default all will be tried to roll out')
argp.add_argument('--excluded-helpers', dest='excluded_helpers', default='get_load_average_windows,get_load_average_copy,get_smart_health_per_device', help='Do not try to roll out these by default. Clear list if needed')
argp.add_argument('--template1', dest='template1', action='store_true', default=False, help='Install helpers into template1 so that all newly craeted DBs will get them automatically')
argp.add_argument('--python2', dest='python2', action='store_true', default=False, help='Use Python v2 (EOL) instead of default v3 in PL/Python helpers')
argp.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='More chat')
rollout_dbs = []
unique_host_port_pairs = set()
global args
args = argp.parse_args()
logging.basicConfig(format='%(message)s', level=(logging.DEBUG if args.verbose else logging.WARNING))
if not args.mode or not args.mode.lower() in ['configdb-all', 'yaml-all', 'single-db', 'single-instance']:
logging.fatal('invalid --mode param value "%s". must be one of: [configdb-all|single-db|instance]', args.mode)
logging.fatal(' configdb-all - roll out helpers to all active DBs defined in pgwatch2 config DB')
logging.fatal(' yaml-all - roll out helpers to all active DBs defined in YAML configs')
logging.fatal(' single-db - roll out helpers on a single DB specified by --host, --port (5432*), --dbname and --user params')
logging.fatal(' single-instance - roll out helpers on all DB-s of an instance specified by --host, --port (5432*) and --user params')
exit(1)
if args.mode.lower() == 'configdb-all' and not args.configdb_host:
logging.fatal('--configdb-host parameter required with --configdb-all')
exit(1)
if args.mode.lower() == 'yaml-all' and not args.config_path:
logging.fatal('--config-path parameter (YAML definitions on monitored instances) required for \'yaml-all\' mode')
exit(1)
if not args.configdb_host and not args.metrics_path:
logging.fatal('one of --configdb-host or --metrics-path needs to be always specified')
exit(1)
if args.mode == 'single-db' and not (args.host and args.user and args.dbname):
logging.fatal('--host, --dbname, --user must be specified for explicit single DB rollout')
exit(1)
if args.mode == 'single-instance' and not (args.host and args.user):
logging.fatal('--host and --user must be specified for explicit single instance rollout')
exit(1)
if not args.user:
args.user = os.getenv('PGUSER') or os.getenv('USER')
logging.debug(args)
if not args.confirm:
logging.warning('starting in DRY-RUN mode, add --confirm to execute')
if args.mode == 'configdb-all':
rollout_dbs = get_active_dbs_from_configdb()
elif args.mode == 'yaml-all':
rollout_dbs = get_monitored_dbs_from_yaml_config()
else:
md = {'md_hostname': args.host, 'md_port': args.port, 'md_dbname': args.dbname, 'md_user': args.user, 'md_password': args.password,
'md_unique_name': 'ad-hoc', 'md_dbtype': 'postgres-continuous-discovery' if args.mode == 'single-instance' else 'postgres'}
if args.mode == 'single-instance':
rollout_dbs = resolve_configdb_host_to_dbs(md)
else: # single DB
rollout_dbs = [md]
logging.warning('*** ROLLOUT TO TARGET DB-s ***')
for i, db in enumerate(rollout_dbs):
pgver, is_primary = get_pg_version_as_text(db['md_hostname'], db['md_port'], args.user, args.password)
if pgver == '':
logging.error('DB #%s: [%s] failed to determine pg version for %s@%s, skipping rollout', i, db['md_unique_name'], db['md_hostname'], db['md_port'])
continue
if not is_primary:
logging.info('DB #%s: [%s] %s@%s skipping as not a primary', i, db['md_unique_name'], db['md_hostname'], db['md_port'])
continue
logging.warning('DB #%s: [%s] %s@%s/%s on version %s', i, db['md_unique_name'], db['md_hostname'], db['md_port'], db['md_dbname'], pgver)
if args.confirm:
ok, total = do_roll_out(db, pgver)
logging.warning('%s / %s succeeded', ok, total)
if args.template1 and (db['md_hostname'], db['md_port']) not in unique_host_port_pairs: # try template1 rollout only once per instance
db_t1 = db.copy()
db_t1['md_dbname'] = 'template1'
logging.warning('DB #%s TEMPLATE1: [%s] %s@%s/%s on version %s', i, db['md_unique_name'], db['md_hostname'],
db['md_port'], db_t1['md_dbname'], pgver)
ok_t1, total_t1 = do_roll_out(db_t1, pgver)
ok += ok_t1
total += total_t1
logging.warning('%s / %s succeeded', ok_t1, total_t1)
unique_host_port_pairs.add((db['md_hostname'], db['md_port']))
logging.info('done')
if __name__ == '__main__':
main()
| bsd-3-clause | -8,282,996,437,867,820,000 | 49.142012 | 224 | 0.615294 | false |
Grumpy-Mike/Mikes-Pi-Bakery | Tap-A-LED_part2/software/Sequencer/sequencer1.py | 1 | 3282 | #!/usr/bin/env python3
# Sequencer1 by Mike Cook August 2020
#Plays samples from a file
import time
from pygame import mixer
import board
import neopixel
from caltap import CalTap
def main():
global markTime, stepTime
init()
print("Sequencer - playing samples through the audio output")
print("Tap to add or remove samples")
t = float(input("Please enter the speed in BPM "))
stepTime = 1/((t/60)*4) # assume 4 beats in a bar
tapCount = 0
beingTouched = False
while 1:
if time.time() - markTime >= stepTime :
markTime = time.time()
nextStep()
if tap.touched() and not beingTouched:
pos = tap.getPos()
if pos[3] : # a valid reading
if pixels[pos[2]] != [0, 0, 0]:
pixels[pos[2]] = (0, 0, 0) # turn off
else:
pixels[pos[2]] = colours[pos[1]]
tapCount += 1
if tapCount >= len(colours) : tapCount = 0
beingTouched = True
pixels.show()
else :
if not tap.touched() : beingTouched = False
def init():
global colours, tap, pixels, posScan , stepTime, markTime
global colBuffer, sounds
# put your own colours here
colours = [(255, 0, 0), (255, 72, 0), (255, 145, 0),
(255, 218, 0), (218, 255, 0), (145, 255, 0),
(72, 255, 0), (0, 255, 0), (255,255,255) ]
tap = CalTap()
pixel_pin = board.D18
num_pixels = 128
# RGB or GRB. Some NeoPixels have red and green reversed
ORDER = neopixel.GRB
BRIGHTNESS = 0.1 # 0.6 is maximum brightness for 3A external supply
pixels = neopixel.NeoPixel(pixel_pin, num_pixels,
brightness = BRIGHTNESS, auto_write = False,
pixel_order = ORDER)
pixels.fill((0, 0, 0))
posScan = 0 ; stepTime = 0.3 ; markTime = time.time()
colBuffer = [(0,0,0)] * 8
mixer.pre_init(44100, -16, 12, 512)
mixer.init()
# change these to other sample names
soundNames = ["0", "1",
"2", "3",
"4", "5",
"6", "7" ]
# change Marimba to another directory containing your samples
sounds = [ mixer.Sound("Marimba/"+
soundNames[i]+".wav")
for i in range(0,len(soundNames))]
mixer.set_num_channels(16)
def nextStep():
global posScan
putCol(posScan)
posScan +=1
if posScan > 15 : posScan = 0
getCol(posScan)
for i in range(8):
pixels[i + posScan * 8] = dimCol(i)
pixels.show()
def dimCol(i):
thresh = 40
r = colBuffer[i][0]
g = colBuffer[i][1]
b = colBuffer[i][2]
if r > thresh :
r -= thresh
else: r += thresh
if g > thresh :
g -= thresh
else: g += thresh
if b > thresh :
b -= thresh
else: b += thresh
return ( r, g, b )
def putCol(pos): # restore old column of colours
for i in range(8):
pixels[i + pos * 8] = colBuffer[i]
def getCol(pos):
for i in range(8):
colBuffer[i] = pixels[i + pos * 8]
#print(colBuffer[i])
if (colBuffer[i] != [0, 0, 0]):
sounds[i].play()
# Main program logic:
if __name__ == '__main__':
main()
| gpl-2.0 | -6,181,341,447,900,539,000 | 29.110092 | 71 | 0.532297 | false |
Altair3/Tanks | bzagents/uberagent/ObstacleList.py | 1 | 3631 | from OccGrid import OccGrid
from geo import Point,Line
class ObstacleList(object):
def __init__(self, occgrid):
self.occgrid = occgrid
self.yMax = occgrid.yMax
self.yMin = self.yMax * -1
self.xMax = occgrid.xMax
self.xMin = self.xMax * -1
self.daList = []
self.threshold = .6
self.neighborCheckNumber = 1
def getObstaclePoints(self):
return self.daList
def removeCornersInBlock(self, x, y, length):
for p in self.daList:
pX = p.x
pY = p.y
if (pX <= (x+length)) and (pX >= x):
if (pY <= (y+length)) and (pY >= y):
self.daList.remove(p)
def scanGrid(self, startX, startY, length):
length = 100
self.removeCornersInBlock(startX, startY, length)
for x in range(startX, (startX+length+1)):
for y in range(startY, (startY+length+1)):
if (x < self.xMin) or (x > self.xMax) or (y < self.yMin) or (y > self.yMax):
continue
#print "Scanning:", "(" + str(x) + "," + str(y) + ")"
if self.isCorner(x,y):
self.daList.append(Point(x,y))
def isCorner(self, x, y):
if self.occgrid.get(x, y) >= self.threshold:
up = self.checkUp(x,y)
down = self.checkDown(x,y)
left = self.checkLeft(x,y)
right = self.checkRight(x,y)
if (up and left):
if ((not down) and (not right)):
return True
else:
return False
if (up and right):
if ((not down) and (not left)):
return True
else:
return False
if (down and left):
if ((not up) and (not right)):
return True
else:
return False
if (down and right):
if ((not up) and (not left)):
return True
else:
return False
return False
def checkUp(self, x, y):
number = 0
for i in range(1, self.neighborCheckNumber+1):
if (y + i) <= self.yMax:
prob = self.occgrid.get(x, (y+i))
if prob < self.threshold:
return False
return True
def checkDown(self, x, y):
for i in range(self.neighborCheckNumber, 0, -1):
if (y - i) >= self.yMin:
prob = self.occgrid.get(x, (y-i))
if prob < self.threshold:
return False
return True
def checkRight(self, x, y):
for i in range(1, self.neighborCheckNumber+1):
if (x + i) <= self.xMax:
prob = self.occgrid.get((x+i), y)
if prob < self.threshold:
return False
return True
def checkLeft(self, x, y):
for i in range(self.neighborCheckNumber, 0, -1):
if (x - i) >= self.xMin:
prob = self.occgrid.get((x-i), y)
if prob < self.threshold:
return False
return True
| gpl-3.0 | 5,781,750,627,482,786,000 | 29.258333 | 92 | 0.41366 | false |
caiorss/vboxcontrol | commandClient.py | 1 | 4394 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from subprocess import PIPE, Popen
import platform
import os
import sys
import re
import zlib
from socket import socket
from socket import AF_INET, SOCK_STREAM, SHUT_RDWR
from socket import SOL_SOCKET, SO_REUSEADDR
localhost = '127.0.0.1'
allhosts = '0.0.0.0'
import logging
import logging.config
LOG_SETTINGS = {
# --------- GENERAL OPTIONS ---------#
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'NOTSET',
'handlers': ['file'],
},
#---------- HANDLERS ---------------#
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'NOTSET',
'formatter': 'detailed',
'stream': 'ext://sys.stdout',
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'NOTSET',
'formatter': 'detailed',
'filename': 'client.log',
'mode': 'a',
'maxBytes': 10485760,
'backupCount': 5,
},
'tcp' : {
'class' : 'logging.handlers.SocketHandler',
'level' : 'INFO',
'host' : '192.168.1.2',
'port' : 9020,
'formatter': 'detailed',
},
},
# ----- FORMATTERS -----------------#
'formatters': {
'detailed': {
'format': '%(asctime)s %(module)-17s line:%(lineno)-4d %(funcName)s() ' \
'%(levelname)-8s %(message)s',
},
'verbose': {
'format': '%(levelname)-8s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
'datefmt': '%a, %d %b %Y %H:%M:%S'
},
'email': {
'format': 'Timestamp: %(asctime)s\nModule: %(module)s\n' \
'Line: %(lineno)d\nMessage: %(message)s',
},
},
}
logging.config.dictConfig(LOG_SETTINGS)
logger = logging.getLogger('root')
class Client(object):
"""
Stream socket server class -- no definitive name
"""
def __init__(self, host, port, buffersize=1024):
self.host = host
self.port = port
self.buffersize = 1024
# Clients IP's connected to this server
self.clients = []
# Client Sockets List
self.connst = []
self.sock = None
self.mode = "shell"
def connect(self):
"""
Try only one time connect to server,
if successfully connected returns True,
False otherwise.
"""
# create socket handler
s = socket(AF_INET, SOCK_STREAM)
self.sock = s
try:
self.sock.connect((self.host, self.port))
return True
except:
return False
def connect_wait(self):
"""
Keep Trying to connect to server, forever,
even if server is down
"""
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sock = s
logger.info("Client waiting server connection")
while True:
try:
self.sock.connect((self.host, self.port))
self.sendc("Client v%s Started from %s - %s " % ( VERSION, os.getcwd(), platform.platform() ))
break
except:
pass
logger.info("Client connected to server OK")
def sendc(self, msg):
"""
Send flow control message to client
"""
self.sock.sendall(msg)
def recevc(self):
"""
Receive control message from client module
"""
logger.info("wainting token")
while True:
data = self.sock.recv(self.buffersize)
#print data
if not data:
continue
else:
# logger.debug("len(data) =%s" % len(data))
# data2 = zlib.decompress(data)
# logger.debug("len(data2) =%s" % len(data2))
return data
def handling_connections(self):
pass
def send(self):
pass
class CommandClient(Client):
def __init__(self, host, port, buffersize):
super(CommandClient, self).__init__(host=host, port=port, buffersize=buffersize)
c = CommandClient(host='localhost', port=9090, buffersize=1024)
c.connect()
c.sendc("Hello world server") | unlicense | 8,847,994,244,525,383,000 | 23.016393 | 110 | 0.506827 | false |
civisanalytics/civis-python | civis/service_client.py | 1 | 8958 | from collections import OrderedDict
from functools import lru_cache
import json
from jsonref import JsonRef
import re
import requests
import warnings
from civis import APIClient
from civis.base import CivisAPIError, Endpoint, tostr_urljoin
from civis.resources._resources import parse_method
from civis._utils import to_camelcase
def _get_service(client):
if client._api_key:
api_client = APIClient(client._api_key)
else:
api_client = APIClient()
service = api_client.services.get(client._service_id)
return service
def auth_service_session(session, client):
service = _get_service(client)
auth_url = service['current_deployment']['displayUrl']
# Make request for adding Authentication Cookie to session
session.get(auth_url)
def _parse_service_path(path, operations, root_path=None):
""" Parse an endpoint into a class where each valid http request
on that endpoint is converted into a convenience function and
attached to the class as a method.
"""
if root_path is not None:
path = path.replace(root_path, '')
path = path.strip('/')
modified_base_path = re.sub("-", "_", path.split('/')[0].lower())
methods = []
for verb, op in operations.items():
method = parse_method(verb, op, path)
if method is None:
continue
methods.append(method)
return modified_base_path, methods
def parse_service_api_spec(api_spec, root_path=None):
"""Dynamically create classes to interface with a Civis Service API.
Parse an OpenAPI (Swagger) specification into a dictionary of classes
where each class represents an endpoint resource and contains
methods to make http requests on that resource.
Parameters
----------
api_spec : OrderedDict
The Civis Service API specification to parse. References should be
resolved before passing, typically using jsonref.JsonRef().
root_path : str, optional
An additional path for APIs that are not hosted on the service's
root level. An example root_path would be '/api' for an app with
resource endpoints that all begin with '/api'.
"""
paths = api_spec['paths']
classes = {}
for path, ops in paths.items():
base_path, methods = _parse_service_path(
path, ops, root_path=root_path)
class_name = to_camelcase(base_path)
if methods and classes.get(base_path) is None:
classes[base_path] = type(str(class_name),
(ServiceEndpoint,),
{})
for method_name, method in methods:
setattr(classes[base_path], method_name, method)
return classes
class ServiceEndpoint(Endpoint):
def __init__(self, client,
return_type='civis'):
self._return_type = return_type
self._client = client
def _build_path(self, path):
if not path:
return self._client._base_url
if not self._client._root_path:
return tostr_urljoin(self._client._base_url, path.strip("/"))
return tostr_urljoin(self._client._base_url,
self._client._root_path.strip("/"),
path.strip("/"))
def _make_request(self, method, path=None, params=None, data=None,
**kwargs):
url = self._build_path(path)
with requests.Session() as sess:
auth_service_session(sess, self._client)
with self._lock:
response = sess.request(method, url, json=data,
params=params, **kwargs)
if not response.ok:
raise CivisAPIError(response)
return response
class ServiceClient():
def __init__(self, service_id, root_path=None,
swagger_path="/endpoints", api_key=None,
return_type='snake', local_api_spec=None):
"""Create an API Client from a Civis service.
Parameters
----------
service_id : str, required
The Id for the service that will be used to generate the client.
root_path : str, optional
An additional path for APIs that are not hosted on the service's
root level. An example root_path would be '/api' for an app with
resource endpoints that all begin with '/api'.
swagger_path : str, optional
The endpoint path that will be used to download the API Spec.
The default value is '/endpoints' but another common path
might be '/spec'. The API Spec must be compliant with Swagger
2.0 standards.
api_key : str, optional
Your API key obtained from the Civis Platform. If not given, the
client will use the :envvar:`CIVIS_API_KEY` environment variable.
This API key will need to be authorized to access the service
used for the client.
return_type : str, optional
The following types are implemented:
- ``'raw'`` Returns the raw :class:`requests:requests.Response`
object.
- ``'snake'`` Returns a :class:`civis.response.Response` object
for the json-encoded content of a response. This maps the
top-level json keys to snake_case.
- ``'pandas'`` Returns a :class:`pandas:pandas.DataFrame` for
list-like responses and a :class:`pandas:pandas.Series` for
single a json response.
local_api_spec : collections.OrderedDict or string, optional
The methods on this class are dynamically built from the Service
API specification, which can be retrieved from the /endpoints
endpoint. When local_api_spec is None, the default, this
specification is downloaded the first time APIClient is
instantiated. Alternatively, a local cache of the specification
may be passed as either an OrderedDict or a filename which
points to a json file.
"""
if return_type not in ['snake', 'raw', 'pandas']:
raise ValueError("Return type must be one of 'snake', 'raw', "
"'pandas'")
self._api_key = api_key
self._service_id = service_id
self._base_url = self.get_base_url()
self._root_path = root_path
self._swagger_path = swagger_path
classes = self.generate_classes_maybe_cached(local_api_spec)
for class_name, klass in classes.items():
setattr(self, class_name, klass(client=self,
return_type=return_type))
def parse_path(self, path, operations):
""" Parse an endpoint into a class where each valid http request
on that endpoint is converted into a convenience function and
attached to the class as a method.
"""
warnings.warn("This method is deprecated and will be removed in "
"v2.0.0. Use the `_parse_service_path` function "
"instead.")
return _parse_service_path(path, operations, root_path=self._root_path)
def parse_api_spec(self, api_spec):
warnings.warn("This method is deprecated and will be removed in "
"v2.0.0. Use the `parse_service_api_spec` function "
"instead.")
return parse_service_api_spec(api_spec, root_path=self._root_path)
@lru_cache(maxsize=4)
def get_api_spec(self):
swagger_url = self._base_url + self._swagger_path
with requests.Session() as sess:
auth_service_session(sess, self)
response = sess.get(swagger_url)
response.raise_for_status()
spec = response.json(object_pairs_hook=OrderedDict)
return spec
@lru_cache(maxsize=4)
def generate_classes(self):
raw_spec = self.get_api_spec()
spec = JsonRef.replace_refs(raw_spec)
return parse_service_api_spec(spec, root_path=self._root_path)
def get_base_url(self):
service = _get_service(self)
return service['current_url']
def generate_classes_maybe_cached(self, cache):
"""Generate class objects either from /endpoints or a local cache."""
if cache is None:
classes = self.generate_classes()
else:
if isinstance(cache, OrderedDict):
raw_spec = cache
elif isinstance(cache, str):
with open(cache, "r") as f:
raw_spec = json.load(f, object_pairs_hook=OrderedDict)
else:
msg = "cache must be an OrderedDict or str, given {}"
raise ValueError(msg.format(type(cache)))
spec = JsonRef.replace_refs(raw_spec)
classes = parse_service_api_spec(spec, root_path=self._root_path)
return classes
| bsd-3-clause | 3,145,371,179,698,265,000 | 39.170404 | 79 | 0.605492 | false |
dekked/dynamodb-mock | tests/functional/pyramid/test_delete_item.py | 1 | 2157 | # -*- coding: utf-8 -*-
import unittest, json
TABLE_NAME = 'Table-HR'
TABLE_NAME_404 = 'Waldo'
TABLE_RT = 45
TABLE_WT = 123
TABLE_HK_NAME = u'hash_key'
TABLE_HK_TYPE = u'N'
TABLE_RK_NAME = u'range_key'
TABLE_RK_TYPE = u'S'
HK_VALUE = u'123'
RK_VALUE = u'Decode this data if you are a coder'
HK = {TABLE_HK_TYPE: HK_VALUE}
RK = {TABLE_RK_TYPE: RK_VALUE}
ITEM = {
TABLE_HK_NAME: HK,
TABLE_RK_NAME: RK,
u'relevant_data': {u'B': u'THVkaWEgaXMgdGhlIGJlc3QgY29tcGFueSBldmVyIQ=='},
}
HEADERS = {
'x-amz-target': 'dynamodb_20111205.DeleteItem',
'content-type': 'application/x-amz-json-1.0',
}
# Goal here is not to test the full API, this is done by the Boto tests
class TestDeleteItem(unittest.TestCase):
def setUp(self):
from ddbmock.database.db import dynamodb
from ddbmock.database.table import Table
from ddbmock.database.key import PrimaryKey
from ddbmock.database.db import dynamodb
from ddbmock import main
app = main({})
from webtest import TestApp
self.app = TestApp(app)
dynamodb.hard_reset()
hash_key = PrimaryKey(TABLE_HK_NAME, TABLE_HK_TYPE)
range_key = PrimaryKey(TABLE_RK_NAME, TABLE_RK_TYPE)
self.t1 = Table(TABLE_NAME, TABLE_RT, TABLE_WT, hash_key, range_key)
dynamodb.data[TABLE_NAME] = self.t1
self.t1.put(ITEM, {})
def tearDown(self):
from ddbmock.database.db import dynamodb
dynamodb.hard_reset()
def test_delete_item_hr(self):
from ddbmock.database.db import dynamodb
request = {
"TableName": TABLE_NAME,
"Key": {
"HashKeyElement": HK,
"RangeKeyElement": RK,
},
}
expected = {
u"ConsumedCapacityUnits": 1,
}
# Protocol check
res = self.app.post_json('/', request, HEADERS, status=200)
self.assertEqual(expected, json.loads(res.body))
self.assertEqual('application/x-amz-json-1.0; charset=UTF-8', res.headers['Content-Type'])
# Live data check
self.assertNotIn((HK_VALUE, RK_VALUE), self.t1.store)
| lgpl-3.0 | -8,038,244,890,519,528,000 | 27.381579 | 98 | 0.613352 | false |
spinolacastro/openshift-ansible | utils/src/ooinstall/cli_installer.py | 1 | 31532 | # TODO: Temporarily disabled due to importing old code into openshift-ansible
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
import click
import os
import re
import sys
from ooinstall import openshift_ansible
from ooinstall import OOConfig
from ooinstall.oo_config import OOConfigInvalidHostError
from ooinstall.oo_config import Host
from ooinstall.variants import find_variant, get_variant_version_combos
DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
def validate_ansible_dir(path):
if not path:
raise click.BadParameter('An ansible path must be provided')
return path
# if not os.path.exists(path)):
# raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
def is_valid_hostname(hostname):
if not hostname or len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1] # strip exactly one dot from the right, if present
allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def validate_prompt_hostname(hostname):
if '' == hostname or is_valid_hostname(hostname):
return hostname
raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
def get_ansible_ssh_user():
click.clear()
message = """
This installation process will involve connecting to remote hosts via ssh. Any
account may be used however if a non-root account is used it must have
passwordless sudo access.
"""
click.echo(message)
return click.prompt('User for ssh access', default='root')
def list_hosts(hosts):
hosts_idx = range(len(hosts))
for idx in hosts_idx:
click.echo(' {}: {}'.format(idx, hosts[idx]))
def delete_hosts(hosts):
while True:
list_hosts(hosts)
del_idx = click.prompt('Select host to delete, y/Y to confirm, ' \
'or n/N to add more hosts', default='n')
try:
del_idx = int(del_idx)
hosts.remove(hosts[del_idx])
except IndexError:
click.echo("\"{}\" doesn't match any hosts listed.".format(del_idx))
except ValueError:
try:
response = del_idx.lower()
if response in ['y', 'n']:
return hosts, response
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
except AttributeError:
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
return hosts, None
def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
"""
Collect host information from user. This will later be filled in using
ansible.
Returns: a list of host information collected from the user
"""
click.clear()
click.echo('*** Host Configuration ***')
message = """
You must now specify the hosts that will compose your OpenShift cluster.
Please enter an IP or hostname to connect to for each system in the cluster.
You will then be prompted to identify what role you would like this system to
serve in the cluster.
OpenShift Masters serve the API and web console and coordinate the jobs to run
across the environment. If desired you can specify multiple Master systems for
an HA deployment, in which case you will be prompted to identify a *separate*
system to act as the load balancer for your cluster after all Masters and Nodes
are defined.
If only one Master is specified, an etcd instance embedded within the OpenShift
Master service will be used as the datastore. This can be later replaced with a
separate etcd instance if desired. If multiple Masters are specified, a
separate etcd cluster will be configured with each Master serving as a member.
Any Masters configured as part of this installation process will also be
configured as Nodes. This is so that the Master will be able to proxy to Pods
from the API. By default this Node will be unschedulable but this can be changed
after installation with 'oadm manage-node'.
OpenShift Nodes provide the runtime environments for containers. They will
host the required services to be managed by the Master.
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
"""
click.echo(message)
hosts = []
more_hosts = True
num_masters = 0
while more_hosts:
host_props = {}
host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname)
if not masters_set:
if click.confirm('Will this host be an OpenShift Master?'):
host_props['master'] = True
num_masters += 1
if oo_cfg.settings['variant_version'] == '3.0':
masters_set = True
host_props['node'] = True
host_props['containerized'] = False
if oo_cfg.settings['variant_version'] != '3.0':
rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
type=click.Choice(['rpm', 'container']),
default='rpm')
if rpm_or_container == 'container':
host_props['containerized'] = True
if existing_env:
host_props['new_host'] = True
else:
host_props['new_host'] = False
host = Host(**host_props)
hosts.append(host)
if print_summary:
print_installation_summary(hosts, oo_cfg.settings['variant_version'])
# If we have one master, this is enough for an all-in-one deployment,
# thus we can start asking if you wish to proceed. Otherwise we assume
# you must.
if masters_set or num_masters != 2:
more_hosts = click.confirm('Do you want to add additional hosts?')
if num_masters >= 3:
collect_master_lb(hosts)
return hosts
def print_installation_summary(hosts, version=None):
"""
Displays a summary of all hosts configured thus far, and what role each
will play.
Shows total nodes/masters, hints for performing/modifying the deployment
with additional setup, warnings for invalid or sub-optimal configurations.
"""
click.clear()
click.echo('*** Installation Summary ***\n')
click.echo('Hosts:')
for host in hosts:
print_host_summary(hosts, host)
masters = [host for host in hosts if host.master]
nodes = [host for host in hosts if host.node]
dedicated_nodes = [host for host in hosts if host.node and not host.master]
click.echo('')
click.echo('Total OpenShift Masters: %s' % len(masters))
click.echo('Total OpenShift Nodes: %s' % len(nodes))
if len(masters) == 1 and version != '3.0':
ha_hint_message = """
NOTE: Add a total of 3 or more Masters to perform an HA installation."""
click.echo(ha_hint_message)
elif len(masters) == 2:
min_masters_message = """
WARNING: A minimum of 3 masters are required to perform an HA installation.
Please add one more to proceed."""
click.echo(min_masters_message)
elif len(masters) >= 3:
ha_message = """
NOTE: Multiple Masters specified, this will be an HA deployment with a separate
etcd cluster. You will be prompted to provide the FQDN of a load balancer once
finished entering hosts."""
click.echo(ha_message)
dedicated_nodes_message = """
WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated
Nodes are specified, each configured Master will be marked as a schedulable
Node."""
min_ha_nodes_message = """
WARNING: A minimum of 3 dedicated Nodes are recommended for an HA
deployment."""
if len(dedicated_nodes) == 0:
click.echo(dedicated_nodes_message)
elif len(dedicated_nodes) < 3:
click.echo(min_ha_nodes_message)
click.echo('')
def print_host_summary(all_hosts, host):
click.echo("- %s" % host.connect_to)
if host.master:
click.echo(" - OpenShift Master")
if host.node:
if host.is_dedicated_node():
click.echo(" - OpenShift Node (Dedicated)")
elif host.is_schedulable_node(all_hosts):
click.echo(" - OpenShift Node")
else:
click.echo(" - OpenShift Node (Unscheduled)")
if host.master_lb:
if host.preconfigured:
click.echo(" - Load Balancer (Preconfigured)")
else:
click.echo(" - Load Balancer (HAProxy)")
if host.master:
if host.is_etcd_member(all_hosts):
click.echo(" - Etcd Member")
else:
click.echo(" - Etcd (Embedded)")
def collect_master_lb(hosts):
"""
Get a valid load balancer from the user and append it to the list of
hosts.
Ensure user does not specify a system already used as a master/node as
this is an invalid configuration.
"""
message = """
Setting up High Availability Masters requires a load balancing solution.
Please provide a the FQDN of a host that will be configured as a proxy. This
can be either an existing load balancer configured to balance all masters on
port 8443 or a new host that will have HAProxy installed on it.
If the host provided does is not yet configured, a reference haproxy load
balancer will be installed. It's important to note that while the rest of the
environment will be fault tolerant this reference load balancer will not be.
It can be replaced post-installation with a load balancer with the same
hostname.
"""
click.echo(message)
host_props = {}
# Using an embedded function here so we have access to the hosts list:
def validate_prompt_lb(hostname):
# Run the standard hostname check first:
hostname = validate_prompt_hostname(hostname)
# Make sure this host wasn't already specified:
for host in hosts:
if host.connect_to == hostname and (host.master or host.node):
raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
'please specify a separate host' % hostname)
return hostname
host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_lb)
install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?')
host_props['preconfigured'] = not install_haproxy
host_props['master'] = False
host_props['node'] = False
host_props['master_lb'] = True
master_lb = Host(**host_props)
hosts.append(master_lb)
def confirm_hosts_facts(oo_cfg, callback_facts):
hosts = oo_cfg.hosts
click.clear()
message = """
A list of the facts gathered from the provided hosts follows. Because it is
often the case that the hostname for a system inside the cluster is different
from the hostname that is resolveable from command line or web clients
these settings cannot be validated automatically.
For some cloud providers the installer is able to gather metadata exposed in
the instance so reasonable defaults will be provided.
Plese confirm that they are correct before moving forward.
"""
notes = """
Format:
connect_to,IP,public IP,hostname,public hostname
Notes:
* The installation host is the hostname from the installer's perspective.
* The IP of the host should be the internal IP of the instance.
* The public IP should be the externally accessible IP associated with the instance
* The hostname should resolve to the internal IP from the instances
themselves.
* The public hostname should resolve to the external ip from hosts outside of
the cloud.
"""
# For testing purposes we need to click.echo only once, so build up
# the message:
output = message
default_facts_lines = []
default_facts = {}
for h in hosts:
if h.preconfigured == True:
continue
default_facts[h.connect_to] = {}
h.ip = callback_facts[h.connect_to]["common"]["ip"]
h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
default_facts_lines.append(",".join([h.connect_to,
h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
output = "%s\n%s" % (output, ",".join([h.connect_to,
h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
output = "%s\n%s" % (output, notes)
click.echo(output)
facts_confirmed = click.confirm("Do the above facts look correct?")
if not facts_confirmed:
message = """
Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
""" % oo_cfg.config_path
click.echo(message)
# Make sure we actually write out the config file.
oo_cfg.save_to_disk()
sys.exit(0)
return default_facts
def check_hosts_config(oo_cfg, unattended):
click.clear()
masters = [host for host in oo_cfg.hosts if host.master]
if len(masters) == 2:
click.echo("A minimum of 3 Masters are required for HA deployments.")
sys.exit(1)
if len(masters) > 1:
master_lb = [host for host in oo_cfg.hosts if host.master_lb]
if len(master_lb) > 1:
click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.')
sys.exit(1)
elif len(master_lb) == 1:
if master_lb[0].master or master_lb[0].node:
click.echo('ERROR: The Master load balancer is configured as a master or node. Please correct this.')
sys.exit(1)
else:
message = """
ERROR: No master load balancer specified in config. You must provide the FQDN
of a load balancer to balance the API (port 8443) on all Master hosts.
https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
"""
click.echo(message)
sys.exit(1)
dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master]
if len(dedicated_nodes) == 0:
message = """
WARNING: No dedicated Nodes specified. By default, colocated Masters have
their Nodes set to unschedulable. If you proceed all nodes will be labelled
as schedulable.
"""
if unattended:
click.echo(message)
else:
confirm_continue(message)
return
def get_variant_and_version(multi_master=False):
message = "\nWhich variant would you like to install?\n\n"
i = 1
combos = get_variant_version_combos()
for (variant, version) in combos:
message = "%s\n(%s) %s %s" % (message, i, variant.description,
version.name)
i = i + 1
message = "%s\n" % message
click.echo(message)
if multi_master:
click.echo('NOTE: 3.0 installations are not')
response = click.prompt("Choose a variant from above: ", default=1)
product, version = combos[response - 1]
return product, version
def confirm_continue(message):
if message:
click.echo(message)
click.confirm("Are you ready to continue?", default=False, abort=True)
return
def error_if_missing_info(oo_cfg):
missing_info = False
if not oo_cfg.hosts:
missing_info = True
click.echo('For unattended installs, hosts must be specified on the '
'command line or in the config file: %s' % oo_cfg.config_path)
sys.exit(1)
if 'ansible_ssh_user' not in oo_cfg.settings:
click.echo("Must specify ansible_ssh_user in configuration file.")
sys.exit(1)
# Lookup a variant based on the key we were given:
if not oo_cfg.settings['variant']:
click.echo("No variant specified in configuration file.")
sys.exit(1)
ver = None
if 'variant_version' in oo_cfg.settings:
ver = oo_cfg.settings['variant_version']
variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
if variant is None or version is None:
err_variant_name = oo_cfg.settings['variant']
if ver:
err_variant_name = "%s %s" % (err_variant_name, ver)
click.echo("%s is not an installable variant." % err_variant_name)
sys.exit(1)
oo_cfg.settings['variant_version'] = version.name
missing_facts = oo_cfg.calc_missing_facts()
if len(missing_facts) > 0:
missing_info = True
click.echo('For unattended installs, facts must be provided for all masters/nodes:')
for host in missing_facts:
click.echo('Host "%s" missing facts: %s' % (host, ", ".join(missing_facts[host])))
if missing_info:
sys.exit(1)
def get_missing_info_from_user(oo_cfg):
""" Prompts the user for any information missing from the given configuration. """
click.clear()
message = """
Welcome to the OpenShift Enterprise 3 installation.
Please confirm that following prerequisites have been met:
* All systems where OpenShift will be installed are running Red Hat Enterprise
Linux 7.
* All systems are properly subscribed to the required OpenShift Enterprise 3
repositories.
* All systems have run docker-storage-setup (part of the Red Hat docker RPM).
* All systems have working DNS that resolves not only from the perspective of
the installer but also from within the cluster.
When the process completes you will have a default configuration for Masters
and Nodes. For ongoing environment maintenance it's recommended that the
official Ansible playbooks be used.
For more information on installation prerequisites please see:
https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
"""
confirm_continue(message)
click.clear()
if oo_cfg.settings.get('ansible_ssh_user', '') == '':
oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user()
click.clear()
if oo_cfg.settings.get('variant', '') == '':
variant, version = get_variant_and_version()
oo_cfg.settings['variant'] = variant.name
oo_cfg.settings['variant_version'] = version.name
click.clear()
if not oo_cfg.hosts:
oo_cfg.hosts = collect_hosts(oo_cfg)
click.clear()
return oo_cfg
def collect_new_nodes(oo_cfg):
click.clear()
click.echo('*** New Node Configuration ***')
message = """
Add new nodes here
"""
click.echo(message)
return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
for host in hosts:
if(host.connect_to in callback_facts.keys()
and 'common' in callback_facts[host.connect_to].keys()
and callback_facts[host.connect_to]['common'].get('version', '')
and callback_facts[host.connect_to]['common'].get('version', '') != 'None'):
installed_hosts.append(host)
return installed_hosts
# pylint: disable=too-many-branches
# This pylint error will be corrected shortly in separate PR.
def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# Copy the list of existing hosts so we can remove any already installed nodes.
hosts_to_run_on = list(oo_cfg.hosts)
# Check if master or nodes already have something installed
installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
if len(installed_hosts) > 0:
click.echo('Installed environment detected.')
# This check has to happen before we start removing hosts later in this method
if not force:
if not unattended:
click.echo('By default the installer only adds new nodes ' \
'to an installed environment.')
response = click.prompt('Do you want to (1) only add additional nodes or ' \
'(2) reinstall the existing hosts ' \
'potentially erasing any custom changes?',
type=int)
# TODO: this should be reworked with error handling.
# Click can certainly do this for us.
# This should be refactored as soon as we add a 3rd option.
if response == 1:
force = False
if response == 2:
force = True
# present a message listing already installed hosts and remove hosts if needed
for host in installed_hosts:
if host.master:
click.echo("{} is already an OpenShift Master".format(host))
# Masters stay in the list, we need to run against them when adding
# new nodes.
elif host.node:
click.echo("{} is already an OpenShift Node".format(host))
# force is only used for reinstalls so we don't want to remove
# anything.
if not force:
hosts_to_run_on.remove(host)
# Handle the cases where we know about uninstalled systems
new_hosts = set(hosts_to_run_on) - set(installed_hosts)
if len(new_hosts) > 0:
for new_host in new_hosts:
click.echo("{} is currently uninstalled".format(new_host))
# Fall through
click.echo('Adding additional nodes...')
else:
if unattended:
if not force:
click.echo('Installed environment detected and no additional ' \
'nodes specified: aborting. If you want a fresh install, use ' \
'`atomic-openshift-installer install --force`')
sys.exit(1)
else:
if not force:
new_nodes = collect_new_nodes(oo_cfg)
hosts_to_run_on.extend(new_nodes)
oo_cfg.hosts.extend(new_nodes)
openshift_ansible.set_config(oo_cfg)
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose)
if error:
click.echo("There was a problem fetching the required information. See " \
"{} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
else:
pass # proceeding as normal should do a clean install
return hosts_to_run_on, callback_facts
@click.group()
@click.pass_context
@click.option('--unattended', '-u', is_flag=True, default=False)
@click.option('--configuration', '-c',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default=None)
@click.option('--ansible-playbook-directory',
'-a',
type=click.Path(exists=True,
file_okay=False,
dir_okay=True,
readable=True),
# callback=validate_ansible_dir,
default=DEFAULT_PLAYBOOK_DIR,
envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
@click.option('--ansible-config',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default=None)
@click.option('--ansible-log-path',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default="/tmp/ansible.log")
@click.option('-v', '--verbose',
is_flag=True, default=False)
#pylint: disable=too-many-arguments
#pylint: disable=line-too-long
# Main CLI entrypoint, not much we can do about too many arguments.
def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
"""
atomic-openshift-installer makes the process for installing OSE or AEP
easier by interactively gathering the data needed to run on each host.
It can also be run in unattended mode if provided with a configuration file.
Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
"""
ctx.obj = {}
ctx.obj['unattended'] = unattended
ctx.obj['configuration'] = configuration
ctx.obj['ansible_config'] = ansible_config
ctx.obj['ansible_log_path'] = ansible_log_path
ctx.obj['verbose'] = verbose
try:
oo_cfg = OOConfig(ctx.obj['configuration'])
except OOConfigInvalidHostError as e:
click.echo(e)
sys.exit(1)
# If no playbook dir on the CLI, check the config:
if not ansible_playbook_directory:
ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
# If still no playbook dir, check for the default location:
if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
validate_ansible_dir(ansible_playbook_directory)
oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
oo_cfg.ansible_playbook_directory = ansible_playbook_directory
ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
if ctx.obj['ansible_config']:
oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config']
elif 'ansible_config' not in oo_cfg.settings and \
os.path.exists(DEFAULT_ANSIBLE_CONFIG):
# If we're installed by RPM this file should exist and we can use it as our default:
oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
ctx.obj['oo_cfg'] = oo_cfg
openshift_ansible.set_config(oo_cfg)
@click.command()
@click.pass_context
def uninstall(ctx):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
click.echo("OpenShift will be uninstalled from the following hosts:\n")
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
for host in oo_cfg.hosts:
click.echo(" * %s" % host.connect_to)
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Uninstall cancelled.")
sys.exit(0)
openshift_ansible.run_uninstall_playbook(verbose)
@click.command()
@click.pass_context
def upgrade(ctx):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
# Update config to reflect the version we're targetting, we'll write
# to disk once ansible completes successfully, not before.
old_variant = oo_cfg.settings['variant']
old_version = oo_cfg.settings['variant_version']
if oo_cfg.settings['variant'] == 'enterprise':
oo_cfg.settings['variant'] = 'openshift-enterprise'
version = find_variant(oo_cfg.settings['variant'])[1]
oo_cfg.settings['variant_version'] = version.name
click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % (
old_variant, old_version, oo_cfg.settings['variant'],
oo_cfg.settings['variant_version']))
for host in oo_cfg.hosts:
click.echo(" * %s" % host.connect_to)
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Upgrade cancelled.")
sys.exit(0)
retcode = openshift_ansible.run_upgrade_playbook(verbose)
if retcode > 0:
click.echo("Errors encountered during upgrade, please check %s." %
oo_cfg.settings['ansible_log_path'])
else:
oo_cfg.save_to_disk()
click.echo("Upgrade completed! Rebooting all hosts is recommended.")
@click.command()
@click.option('--force', '-f', is_flag=True, default=False)
@click.pass_context
def install(ctx, force):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if ctx.obj['unattended']:
error_if_missing_info(oo_cfg)
else:
oo_cfg = get_missing_info_from_user(oo_cfg)
check_hosts_config(oo_cfg, ctx.obj['unattended'])
print_installation_summary(oo_cfg.hosts, oo_cfg.settings.get('variant_version', None))
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
verbose)
if error:
click.echo("There was a problem fetching the required information. " \
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
hosts_to_run_on, callback_facts = get_hosts_to_run_on(
oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
click.echo('Writing config to: %s' % oo_cfg.config_path)
# We already verified this is not the case for unattended installs, so this can
# only trigger for live CLI users:
# TODO: if there are *new* nodes and this is a live install, we may need the user
# to confirm the settings for new nodes. Look into this once we're distinguishing
# between new and pre-existing nodes.
if len(oo_cfg.calc_missing_facts()) > 0:
confirm_hosts_facts(oo_cfg, callback_facts)
oo_cfg.save_to_disk()
click.echo('Ready to run installation process.')
message = """
If changes are needed please edit the config file above and re-run.
"""
if not ctx.obj['unattended']:
confirm_continue(message)
error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
hosts_to_run_on, verbose)
if error:
# The bootstrap script will print out the log location.
message = """
An error was detected. After resolving the problem please relaunch the
installation process.
"""
click.echo(message)
sys.exit(1)
else:
message = """
The installation was successful!
If this is your first time installing please take a look at the Administrator
Guide for advanced options related to routing, storage, authentication and much
more:
http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
"""
click.echo(message)
click.pause()
cli.add_command(install)
cli.add_command(upgrade)
cli.add_command(uninstall)
if __name__ == '__main__':
# This is expected behaviour for context passing with click library:
# pylint: disable=unexpected-keyword-arg
cli(obj={})
| apache-2.0 | -6,367,065,744,073,844,000 | 37.594859 | 120 | 0.636845 | false |
Murithi/lacuna | config/wsgi.py | 1 | 1548 | """
WSGI config for lacuna project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause | -966,954,807,840,109,800 | 39.736842 | 79 | 0.795866 | false |
lemiere/python-lecture | tp_stat/exemples/boucle_for.py | 1 | 1274 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Lemiere Yves
# Juillet 2017
def main():
debug = True
if debug:
print("*************************")
print("* Welcome in boucle_for *")
print("*************************\n")
# Ceci est une liste de chaines de caractères:
my_list_of_heroes = ['Spider-Man','Daredevil','Iron Man','Flash','Wonder Woman']
print(my_list_of_heroes)
# La variable 'hero' prendra tour à tour chaque valeur de la liste my_list_of_heroes:
iterator = 0
for hero in my_list_of_heroes:
print (iterator)
print ("my current hero is {}".format(hero))
iterator = iterator + 1
print("Finished with {} heroes ".format(iterator))
print("Finished with {} heroes ".format(len(my_list_of_heroes)))
# Cette boucle ne commence qu'à partir du second élément de la liste:
iterator = 0
for hero in my_list_of_heroes[2:]:
print (iterator)
print ("my current hero is {}".format(hero))
iterator = iterator + 1
print("Finished with {} heroes ".format(iterator))
print("Finished with {} heroes ".format(len(my_list_of_heroes)))
return
main()
| gpl-3.0 | 3,050,231,175,831,166,500 | 26.586957 | 93 | 0.547675 | false |
wcmitchell/insights-core | insights/parsers/yum_conf.py | 1 | 2326 | """
YumConf - file ``/etc/yum.conf``
================================
This module provides parsing for the ``/etc/yum.conf`` file.
The ``YumConf`` class parses the information in the file
``/etc/yum.conf``. See the ``IniConfigFile`` class for more
information on attributes and methods.
Sample input data looks like::
[main]
cachedir=/var/cache/yum/$basearch/$releasever
keepcache=0
debuglevel=2
logfile=/var/log/yum.log
exactarch=1
obsoletes=1
gpgcheck=1
plugins=1
installonly_limit=3
[rhel-7-server-rpms]
metadata_expire = 86400
baseurl = https://cdn.redhat.com/content/rhel/server/7/$basearch
name = Red Hat Enterprise Linux 7 Server (RPMs)
gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
enabled = 1
gpgcheck = 1
Examples:
>>> yconf = shared[YumConf]
>>> yconf.defaults()
{'admin_token': 'ADMIN', 'compute_port': '8774'}
>>> 'main' in yconf
True
>>> 'rhel-7-server-rpms' in yconf
True
>>> yconf.has_option('main', 'gpgcheck')
True
>>> yconf.has_option('main', 'foo')
False
>>> yconf.get('rhel-7-server-rpms', 'enabled')
'1'
>>> yconf.items('main')
{'plugins': '1',
'keepcache': '0',
'cachedir': '/var/cache/yum/$basearch/$releasever',
'exactarch': '1',
'obsoletes': '1',
'installonly_limit': '3',
'debuglevel': '2',
'gpgcheck': '1',
'logfile': '/var/log/yum.log'}
"""
from insights.contrib.ConfigParser import NoOptionError
from .. import parser, IniConfigFile
from insights.specs import yum_conf
@parser(yum_conf)
class YumConf(IniConfigFile):
"""Parse contents of file ``/etc/yum.conf``."""
def parse_content(self, content):
super(YumConf, self).parse_content(content)
# File /etc/yum.conf may contain repos definitions.
# Keywords 'gpgkey' and 'baseurl' might contain multiple
# values separated by comma. Convert those values into a list.
for section in self.sections():
for key in ('gpgkey', 'baseurl'):
try:
value = self.get(section, key)
if value and isinstance(value, str):
self.data.set(section, key, value.split(','))
except NoOptionError:
pass
| apache-2.0 | -8,295,788,500,542,155,000 | 28.075 | 70 | 0.596733 | false |
joshua-cogliati-inl/raven | rook/TextDiff.py | 1 | 5519 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This implements a text differ.
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import difflib
from Tester import Differ
class TextDiff:
""" TextDiff is used for comparing a bunch of xml files.
"""
def __init__(self, out_files, gold_files, **kwargs):
"""
Create a TextDiff class
@ In, out_files, string list, the files to be compared.
@ In, gold_files, string list, the gold files to compare to the outfiles
@ In, kwargs, dictionary, other arguments that may be included:
- 'comment': indicates the character or string that should be used to denote a comment line
@ Out, None
"""
assert len(out_files) == len(gold_files)
self.__out_files = out_files
self.__gold_files = gold_files
self.__messages = ""
self.__same = True
self.__options = kwargs
def diff(self):
"""
Run the comparison.
@ In, None
@ Out, (same,messages), (boolean, string), where same is true if all
the txt files are the same, and messages is a string with all
the differences.
"""
# read in files
comment_symbol = self.__options['comment']
for test_filename, gold_filename in zip(self.__out_files, self.__gold_files):
if not os.path.exists(test_filename):
self.__same = False
self.__messages += 'Test file does not exist: '+test_filename
elif not os.path.exists(gold_filename):
self.__same = False
self.__messages += 'Gold file does not exist: '+gold_filename
else:
files_read = True
try:
test_file = open(test_filename)
test_lines = [line.split(comment_symbol, 1)[0].strip()
if len(comment_symbol) > 0
else line for line in test_file]
test_lines = [line for line in test_lines if len(line) > 0]
test_file.close()
except Exception as exp:
self.__same = False
self.__messages += "Error reading " + test_filename + ":" + str(exp) + " "
files_read = False
try:
gold_file = open(gold_filename)
gold_lines = [line.split(comment_symbol, 1)[0].strip()
if len(comment_symbol) > 0
else line for line in gold_file]
gold_lines = [line for line in gold_lines if len(line) > 0]
gold_file.close()
except Exception as exp:
self.__same = False
self.__messages += "Error reading " + gold_filename + ":" + str(exp) + " "
files_read = False
if files_read:
diff = list(difflib.unified_diff(test_lines, gold_lines))
# deletions = [ line for line in diff if line.startswith('-')]
# additions = [ line for line in diff if line.startswith('+')]
if len(diff):
self.__same = False
separator = "\n"+" "*4
self.__messages += "Mismatch between "+test_filename+" and "+gold_filename+separator
#truncation prevents too much output
self.__messages += separator.join(diff[2:8]) + separator+'...' + "\n"
if '[' in self.__messages or ']' in self.__messages:
self.__messages = self.__messages.replace('[', '(')
self.__messages = self.__messages.replace(']', ')')
return (self.__same, self.__messages)
class Text(Differ):
"""
This is the class to use for handling the Text block.
"""
@staticmethod
def get_valid_params():
"""
Returns the parameters that this class can use.
@ In, None
@ Out, params, _ValidParameters, return the parameters.
"""
params = Differ.get_valid_params()
params.add_param('comment', '-20021986', "Character or string denoting "+
"comments, all text to the right of the symbol will be "+
"ignored in the diff of text files")
return params
def __init__(self, name, params, test_dir):
"""
Initializer for the class. Takes a String name and a dictionary params
@ In, name, string, name of the test.
@ In, params, dictionary, parameters for the class
@ In, test_dir, string, path to the test.
@ Out, None.
"""
Differ.__init__(self, name, params, test_dir)
self.__text_opts = {'comment': self.specs['comment']}
#self.__text_files = self.specs['output'].split()
def check_output(self):
"""
Checks that the output matches the gold.
returns (same, message) where same is true if the
test passes, or false if the test failes. message should
gives a human readable explaination of the differences.
@ In, None
@ Out, (same, message), same is true if the tests passes.
"""
text_files = self._get_test_files()
gold_files = self._get_gold_files()
text_diff = TextDiff(text_files, gold_files, **self.__text_opts)
return text_diff.diff()
| apache-2.0 | 427,634,132,905,938,750 | 38.421429 | 99 | 0.608081 | false |
pressbooks/trellis | lib/trellis/plugins/callback/vars.py | 1 | 4548 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
from __main__ import cli
from ansible.module_utils.six import iteritems
from ansible.errors import AnsibleError
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
from ansible.playbook.play_context import PlayContext
from ansible.playbook.task import Task
from ansible.plugins.callback import CallbackBase
from ansible.template import Templar
from ansible.utils.unsafe_proxy import wrap_var
class CallbackModule(CallbackBase):
''' Creates and modifies play and host variables '''
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'vars'
def __init__(self):
super(CallbackModule, self).__init__()
# handle Ansible 2.7 and 2.8 cases by normalizing each into a dict
try:
from ansible import context
self._options = context.CLIARGS
except ImportError:
self._options = vars(cli.options) if cli else {}
def raw_triage(self, key_string, item, patterns):
# process dict values
if isinstance(item, AnsibleMapping):
return AnsibleMapping(dict((key,self.raw_triage('.'.join([key_string, key]), value, patterns)) for key,value in iteritems(item)))
# process list values
elif isinstance(item, AnsibleSequence):
return AnsibleSequence([self.raw_triage('.'.join([key_string, str(i)]), value, patterns) for i,value in enumerate(item)])
# wrap values if they match raw_vars pattern
elif isinstance(item, AnsibleUnicode):
match = next((pattern for pattern in patterns if re.match(pattern, key_string)), None)
return wrap_var(item) if match else item
else:
return item
def raw_vars(self, play, host, hostvars):
if 'raw_vars' not in hostvars:
return
raw_vars = Templar(variables=hostvars, loader=play._loader).template(hostvars['raw_vars'])
if not isinstance(raw_vars, list):
raise AnsibleError('The `raw_vars` variable must be defined as a list.')
patterns = [re.sub(r'\*', '(.)*', re.sub(r'\.', '\.', var)) for var in raw_vars if var.split('.')[0] in hostvars]
keys = set(pattern.split('\.')[0] for pattern in patterns)
for key in keys:
if key in play.vars:
play.vars[key] = self.raw_triage(key, play.vars[key], patterns)
elif key in hostvars:
host.vars[key] = self.raw_triage(key, hostvars[key], patterns)
def cli_options(self):
options = []
strings = {
'--connection': 'connection',
'--private-key': 'private_key_file',
'--ssh-common-args': 'ssh_common_args',
'--ssh-extra-args': 'ssh_extra_args',
'--timeout': 'timeout',
'--vault-password-file': 'vault_password_file',
}
for option,value in iteritems(strings):
if self._options.get(value, False):
options.append("{0}='{1}'".format(option, str(self._options.get(value))))
for inventory in self._options.get('inventory'):
options.append("--inventory='{}'".format(str(inventory)))
if self._options.get('ask_vault_pass', False):
options.append('--ask-vault-pass')
return ' '.join(options)
def darwin_without_passlib(self):
if not sys.platform.startswith('darwin'):
return False
try:
import passlib.hash
return False
except:
return True
def v2_playbook_on_play_start(self, play):
env = play.get_variable_manager().get_vars(play=play).get('env', '')
env_group = next((group for key,group in iteritems(play.get_variable_manager()._inventory.groups) if key == env), False)
if env_group:
env_group.set_priority(20)
for host in play.get_variable_manager()._inventory.list_hosts(play.hosts[0]):
hostvars = play.get_variable_manager().get_vars(play=play, host=host)
self.raw_vars(play, host, hostvars)
host.vars['ssh_args_default'] = PlayContext(play=play)._ssh_args.default
host.vars['cli_options'] = self.cli_options()
host.vars['cli_ask_pass'] = self._options.get('ask_pass', False)
host.vars['cli_ask_become_pass'] = self._options.get('become_ask_pass', False)
host.vars['darwin_without_passlib'] = self.darwin_without_passlib()
| mit | 7,478,106,766,313,598,000 | 38.894737 | 141 | 0.616755 | false |
mattbasta/amo-validator | tests/compat/test_tb7.py | 1 | 1805 | from helper import CompatTestCase
from validator.compat import TB7_DEFINITION
class TestTB7Compat(CompatTestCase):
"""Test that compatibility tests for Thunderbird 7 are properly executed."""
VERSION = TB7_DEFINITION
def test_nsIMsgThread(self):
for method in self.run_xpcom_for_compat(
"nsIMsgThread", ["GetChildAt()"]):
self.assert_silent()
self.assert_compat_error(type_="notice")
def test_mail_attachment(self):
"""Test that the old mail attachment global functions are flagged."""
functions = ["createNewAttachmentInfo",
"saveAttachment",
"attachmentIsEmpty",
"openAttachment",
"detachAttachment",
"cloneAttachment"]
for function in functions:
self.run_script_for_compat("%s()" % function)
self.assert_silent()
self.assert_compat_error(type_="notice")
def test_dictUtils_removal(self):
"""Test that dictUtils.js imports are flagged."""
self.run_script_for_compat(
'Components.utils.import("resource:///modules/dictUtils.js");')
self.assert_silent()
self.assert_compat_error(type_="warning")
def test_deRDF_addressbook(self):
"""Test that addressbook RDF sources are flagged."""
self.run_script_for_compat("""
var x = 'datasources="rdf:addressdirectory" ref="moz-abdirectory://"';
""")
self.assert_silent()
self.assert_compat_error(type_="notice")
self.run_script_for_compat("""
var x = 'GetResource(SomeText).QueryInterface(6inTestxnsIAbDirectory);';
""")
self.assert_silent()
self.assert_compat_error(type_="notice")
| bsd-3-clause | -2,783,248,874,788,189,700 | 35.836735 | 80 | 0.600554 | false |
renatopp/liac-chess | tests/test_queen_movement.py | 1 | 5033 | import unittest
from .helpers import StubBoard, StubPiece, C, WHITE, BLACK
class TestQueenMovement(unittest.TestCase):
def get_queen(self, board, team, position):
from chess.models import Queen
return Queen(board, team, position)
# VALID MOVES -------------------------------------------------------------
def test_valid_vertical(self):
board = StubBoard()
# Top
queen = self.get_queen(board, WHITE, C('e4'))
result = queen.is_valid_move(C('e8'))
self.assertTrue(result)
# Bottom
queen = self.get_queen(board, WHITE, C('e4'))
result = queen.is_valid_move(C('e1'))
self.assertTrue(result)
def test_valid_horizontal(self):
board = StubBoard()
# Right
queen = self.get_queen(board, WHITE, C('e4'))
result = queen.is_valid_move(C('h4'))
self.assertTrue(result)
# Left
queen = self.get_queen(board, WHITE, C('e4'))
result = queen.is_valid_move(C('a4'))
self.assertTrue(result)
# -------------------------------------------------------------------------
# INVALID MOVES -----------------------------------------------------------
def test_invalid_obstructedvertical(self):
board = StubBoard()
board[C('e7')] = StubPiece(board, WHITE, C('e7'))
board[C('e2')] = StubPiece(board, WHITE, C('e2'))
# Top
queen = self.get_queen(board, BLACK, C('e4'))
result = queen.is_valid_move(C('e8'))
self.assertFalse(result)
# Bottom
queen = self.get_queen(board, BLACK, C('e4'))
result = queen.is_valid_move(C('e1'))
self.assertFalse(result)
def test_invalid_obstructedhorizontal(self):
board = StubBoard()
board[C('g4')] = StubPiece(board, WHITE, C('g4'))
board[C('b4')] = StubPiece(board, WHITE, C('b4'))
# Right
queen = self.get_queen(board, WHITE, C('e4'))
result = queen.is_valid_move(C('h4'))
self.assertFalse(result)
# Left
queen = self.get_queen(board, WHITE, C('e4'))
result = queen.is_valid_move(C('a4'))
self.assertFalse(result)
def test_invalid_samepos(self):
board = StubBoard()
queen = self.get_queen(board, WHITE, C('e4'))
result = queen.is_valid_move(C('e4'))
self.assertFalse(result)
# -------------------------------------------------------------------------
# VALID MOVES -------------------------------------------------------------
def test_valid_topright(self):
board = StubBoard()
queen = self.get_queen(board, WHITE, C('e4'))
result = queen.is_valid_move(C('h7'))
self.assertTrue(result)
def test_valid_topleft(self):
board = StubBoard()
queen = self.get_queen(board, WHITE, C('e4'))
result = queen.is_valid_move(C('a8'))
self.assertTrue(result)
def test_valid_bottomleft(self):
board = StubBoard()
queen = self.get_queen(board, WHITE, C('e4'))
result = queen.is_valid_move(C('b1'))
self.assertTrue(result)
def test_valid_bottomright(self):
board = StubBoard()
queen = self.get_queen(board, WHITE, C('e4'))
result = queen.is_valid_move(C('h1'))
self.assertTrue(result)
def test_valid_topright2(self):
board = StubBoard()
board[C('c2')] = StubPiece(board, WHITE, C('c2'))
board[C('e2')] = StubPiece(board, WHITE, C('e2'))
queen = self.get_queen(board, WHITE, C('c1'))
result = queen.is_valid_move(C('f4'))
self.assertTrue(result)
# -------------------------------------------------------------------------
# INVALID MOVES -----------------------------------------------------------
def test_invalid_obstructed_topright(self):
board = StubBoard()
board[C('g6')] = StubPiece(board, WHITE, C('g6'))
queen = self.get_queen(board, BLACK, C('e4'))
result = queen.is_valid_move(C('h7'))
self.assertFalse(result)
def test_invalid_obstructed_topleft(self):
board = StubBoard()
board[C('b7')] = StubPiece(board, WHITE, C('b7'))
queen = self.get_queen(board, BLACK, C('e4'))
result = queen.is_valid_move(C('a8'))
self.assertFalse(result)
def test_invalid_obstructed_bottomleft(self):
board = StubBoard()
board[C('c2')] = StubPiece(board, WHITE, C('c2'))
queen = self.get_queen(board, BLACK, C('e4'))
result = queen.is_valid_move(C('b1'))
self.assertFalse(result)
def test_invalid_obstructed_bottomright(self):
board = StubBoard()
board[C('g2')] = StubPiece(board, WHITE, C('g2'))
queen = self.get_queen(board, BLACK, C('e4'))
result = queen.is_valid_move(C('h1'))
self.assertFalse(result)
# -------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main() | mit | 316,861,248,873,598,000 | 32.118421 | 79 | 0.509239 | false |
HarrieO/PairwisePreferenceMultileave | utils/argparsers/multileaveargparser.py | 1 | 1774 | # -*- coding: utf-8 -*-
import sys
import os
import argparse
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from utils.argparsers.simulationargparser import SimulationArgumentParser
class MultileaveArgumentParser(SimulationArgumentParser):
def __init__(self, description=None, set_arguments={}):
set_arguments['print_feature_count'] = False
super(MultileaveArgumentParser, self).__init__(description=description,
set_arguments=set_arguments)
# self.set_argument_namespace('MultileaveArgumentParser')
# self.add_argument('--bias', dest='bias_experiment', action='store_true', required=False,
# default=False, help='Flag for bias experiment.')
# self.add_argument('--k --n_results', dest='k', default=10, type=int,
# help='Number of results shown after each query.')
self.add_argument('--n_rankers', dest='n_rankers', required=True, type=int,
help='Number of rankers to use in simulation.')
# def get_multileave_args(self, args):
# return self.get_args(args, 'MultileaveArgumentParser')
# def parse_args_rec(self):
# output_str, args, sim_args = super(MultileaveArgumentParser, self).parse_args_rec()
# multileave_args = self.get_multileave_args(args)
# if not sim_args.no_run_details:
# output_str += '\nMultileave Arguments'
# output_str += '\n---------------------'
# for name, value in vars(multileave_args).items():
# output_str += '\n%s %s' % (name, value)
# output_str += '\n---------------------'
# return output_str, args, sim_args, multileave_args
| mit | 1,622,209,415,317,331,000 | 44.487179 | 98 | 0.590192 | false |
deepmind/sonnet | sonnet/src/nets/cifar10_convnet_test.py | 1 | 4147 | # Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.v2.src.nets.cifar10_convnet."""
from absl.testing import parameterized
import numpy as np
from sonnet.src import test_utils
from sonnet.src.nets import cifar10_convnet
import tensorflow as tf
class ModelTest(parameterized.TestCase, test_utils.TestCase):
def testModelCreation(self):
convnet = cifar10_convnet.Cifar10ConvNet()
self.assertLen(convnet.submodules, 45)
def testFailedModelCreation(self):
with self.assertRaisesRegex(
ValueError,
'The length of `output_channels` and `strides` must be equal.'):
cifar10_convnet.Cifar10ConvNet(strides=(1, 2, 3), output_channels=(1,))
@parameterized.parameters({'batch_size': 1}, {'batch_size': 4},
{'batch_size': 128})
def testModelForwards(self, batch_size):
image_batch = tf.constant(
np.random.randn(batch_size, 24, 24, 3), dtype=tf.float32)
convnet = cifar10_convnet.Cifar10ConvNet()
output = convnet(image_batch, is_training=True)
self.assertLen(convnet.variables, 112)
self.assertEqual(output['logits'].shape, [batch_size, 10])
# One intermediate activation per conv layer, plus one after the global
# mean pooling, before the linear.
self.assertLen(output['activations'], 12)
@parameterized.parameters({'batch_size': 1}, {'batch_size': 4},
{'batch_size': 128})
def testModelForwardsFunction(self, batch_size):
image_batch = tf.constant(
np.random.randn(batch_size, 24, 24, 3), dtype=tf.float32)
convnet = cifar10_convnet.Cifar10ConvNet()
convnet_function = tf.function(convnet)
output = convnet_function(image_batch, is_training=True)
self.assertLen(convnet.variables, 112)
self.assertEqual(output['logits'].shape, [batch_size, 10])
# One intermediate activation per conv layer, plus one after the global
# mean pooling, before the linear.
self.assertLen(output['activations'], 12)
def testDifferentSizedImages(self):
# Due to global average pooling, different sized images should work fine
# as long they are above some minimum size.
convnet = cifar10_convnet.Cifar10ConvNet()
small_image = tf.constant(np.random.randn(4, 32, 32, 3), dtype=tf.float32)
small_output = convnet(small_image, is_training=True)
self.assertEqual(small_output['logits'].shape, [4, 10])
# Change height, width and batch size
big_image = tf.constant(np.random.randn(12, 64, 64, 3), dtype=tf.float32)
big_output = convnet(big_image, is_training=True)
self.assertEqual(big_output['logits'].shape, [12, 10])
def testDefunBackProp(self):
convnet = cifar10_convnet.Cifar10ConvNet()
@tf.function
def do_training_step(image, labels):
with tf.GradientTape() as tape:
logits = convnet(image, is_training=True)['logits']
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
grads = tape.gradient(loss, convnet.trainable_variables)
return loss, grads
image = tf.random.normal([4, 32, 32, 3])
labels = np.random.randint(low=0, high=10, size=[4], dtype=np.int64)
loss, grads = do_training_step(image, labels)
self.assertEqual(loss.numpy().shape, ())
for grad, var in zip(grads, convnet.trainable_variables):
self.assertIsNotNone(grad)
self.assertEqual(grad.numpy().shape, var.shape)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -676,603,326,283,469,400 | 38.875 | 78 | 0.680733 | false |
alexhunsley/nsconf-video-file-renamer | nsConfVidsRenamer.py | 1 | 1816 | # NSConf videos renamer
# Alex Hunsley 2013
#
# This quick hack fixes the WTF that is the file naming of the NSConf videos.
# Note that the files are renamed by making copies, rather than
# renamed in place, to avoid annoying irreversible screwups.
#
import csv
import os.path
import sys
import string
import shutil
# source vid files
vidsFolder = "allVidsUnzipped"
# destination vid files (look here for the renamed goodness)
renamedVidsFolder = "allVidsRenamed"
if os.path.exists(renamedVidsFolder):
shutil.rmtree(renamedVidsFolder)
os.makedirs(renamedVidsFolder)
# This file should have been provided alongside this script.
# It's metadata created from the NSConf vids download page. Which is itself
# inconsistent in the format of data it provides, and some stuff near the end
# is in the wrong order. What do I look like to you, the unix sort command?
csvfile = open('NSConfVidsSummary1.csv', 'rb')
validFilenameChars = "-_.() %s%s" % (string.ascii_letters, string.digits)
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
firstRow = True
for row in reader:
if (firstRow):
firstRow = False
continue
vidFilename = row[8]
description = row[0]
vidIndex = vidFilename[:2]
authors = row[1]
baseName = row[9]
if (len(authors) == 0):
authors = "Misc"
fullFilename = "%s - %02d %s (%s).%s" % (baseName, int(vidIndex), description, authors, "m4v")
fullFilename = ''.join(c for c in fullFilename if c in validFilenameChars)
fullDestinationFilename = "%s/%s" % (renamedVidsFolder, fullFilename)
fullSourceFilename = "%s/%s.m4v" % (vidsFolder, vidFilename)
print "%s --> %s" % (fullSourceFilename, fullDestinationFilename)
try:
shutil.copyfile(fullSourceFilename, fullDestinationFilename)
except IOError:
print "****** Warning: file not found: %s" % fullSourceFilename
| mit | -1,552,988,196,664,735,200 | 27.375 | 95 | 0.73348 | false |
Azarn/mytodo | todo/tests.py | 1 | 7780 | from django.test import TestCase
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.contrib.auth import get_user_model
from rest_framework.test import APIRequestFactory, force_authenticate
from rest_framework import status
from .models import Category, Todo, Profile
from .views import CategoryDetail, CategoryList, TagDetail, TagList, TodoDetail, TodoList
class DefaultCategoryTestCase(TestCase):
def setUp(self):
self.user = get_user_model().objects.create(username='user')
def _check_exists(self):
return Category.objects.filter(user=self.user, name=Category.DEFAULT_NAME).exists()
def test_default_category_getting(self):
self.assertIsNone(Category.get_default_category(self.user))
category = Category.get_or_create_default(self.user)
self.assertEqual(category, Category.get_default_category(self.user))
Category.delete_default_if_empty(self.user)
self.assertIsNone(Category.get_default_category(self.user))
def test_default_category_creation_deletion(self):
self.assertFalse(self._check_exists())
Category.get_or_create_default(self.user)
self.assertTrue(self._check_exists())
Category.delete_default_if_empty(self.user)
self.assertFalse(self._check_exists())
def test_todo_empty_category(self):
todo = Todo.objects.create(user=self.user, text='Test todo', deadline=timezone.now())
self.assertEqual(todo.category, Category.objects.get(user=self.user, name=Category.DEFAULT_NAME))
test_category = Category.objects.create(user=self.user, name='Test category')
todo.category = test_category
todo.save()
self.assertFalse(self._check_exists())
todo.reset_category()
self.assertTrue(self._check_exists())
todo.category = test_category
todo.save()
self.assertFalse(self._check_exists())
test_category.delete()
self.assertTrue(self._check_exists())
todo = Todo.objects.get(pk=todo.pk)
self.assertEqual(todo.category, Category.get_default_category(todo.user))
todo.delete()
self.assertFalse(self._check_exists())
class ApiUserRestrictionCategoryListTestCase(TestCase):
def setUp(self):
self.my_user = get_user_model().objects.create(username='my_user')
Profile(user=self.my_user).save()
self.other_user = get_user_model().objects.create(username='other_user')
Profile(user=self.other_user).save()
self.my_category = Category.objects.create(user=self.my_user, name='Test my category')
self.other_category = Category.objects.create(user=self.other_user, name='Test my category')
self.factory = APIRequestFactory()
self.url = reverse('todo:category-list')
self.view = CategoryList.as_view()
def test_category_list_methods_not_authenticated(self):
request = self.factory.get(self.url, format='json')
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
request = self.factory.post(self.url, format='json')
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_category_list_get(self):
request = self.factory.get(self.url, format='json')
force_authenticate(request, self.my_user, self.my_user.auth_token)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'], [{'id': self.my_category.id, 'name': self.my_category.name}])
def test_category_list_create(self):
data = {'name': 'New category', 'id': 100, 'user': self.other_user.id}
request = self.factory.post(self.url, data, format='json')
force_authenticate(request, self.my_user, self.my_user.auth_token)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Category.objects.all().count(), 3)
new_category = Category.objects.get(pk=response.data['id'])
self.assertNotEqual(new_category.id, data['id'])
self.assertNotEqual(new_category.user_id, self.other_user.id)
self.assertEqual(new_category.name, data['name'])
self.assertEqual(response.data, {'id': new_category.id, 'name': data['name']})
class ApiUserRestrictionCategoryDetailTestCase(TestCase):
def setUp(self):
self.my_user = get_user_model().objects.create(username='my_user')
Profile(user=self.my_user).save()
self.other_user = get_user_model().objects.create(username='other_user')
Profile(user=self.other_user).save()
self.my_category = Category.objects.create(user=self.my_user, name='Test my category')
self.other_category = Category.objects.create(user=self.other_user, name='Test my category')
self.factory = APIRequestFactory()
self.view = CategoryDetail.as_view()
self.urls = [reverse('todo:category-detail', args=(self.my_category.id,)),
reverse('todo:category-detail', args=(self.other_category.id,))]
def test_category_detail_methods_not_authenticated(self):
for url in self.urls:
request = self.factory.get(url, format='json')
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
request = self.factory.put(url, format='json')
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
request = self.factory.delete(url, format='json')
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_category_detail_get(self):
request = self.factory.get(self.urls[0], format='json')
force_authenticate(request, self.my_user, self.my_user.auth_token)
response = self.view(request, pk=self.my_category.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': self.my_category.id, 'name': self.my_category.name})
request = self.factory.get(self.urls[1], format='json')
force_authenticate(request, self.my_user, self.my_user.auth_token)
response = self.view(request, pk=self.other_category.id)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_category_detail_put(self):
data = {'id': 100, 'user': 100, 'name': "New category name"}
request = self.factory.put(self.urls[0], data, format='json')
force_authenticate(request, self.my_user, self.my_user.auth_token)
response = self.view(request, pk=self.my_category.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': self.my_category.id, 'name': data['name']})
self.my_category = Category.objects.get(id=self.my_category.id)
self.assertEqual(self.my_category.name, data['name'])
self.assertEqual(self.my_category.user.id, self.my_user.id)
request = self.factory.put(self.urls[1], data, format='json')
force_authenticate(request, self.my_user, self.my_user.auth_token)
response = self.view(request, pk=self.other_category.id)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.other_category = Category.objects.get(id=self.other_category.id)
self.assertNotEqual(self.other_category.name, data['name'])
self.assertEqual(self.other_category.user.id, self.other_user.id) | apache-2.0 | -2,008,331,970,430,733,300 | 50.529801 | 112 | 0.679306 | false |
WorldViews/Spirals | scripts/filterLayer.py | 1 | 1154 |
import json
#from LandOrSea import overLand
from LandOrSeaBaseline import overLand
def filterByLand(path, opath):
obj = json.load(file(path))
recs = obj['records']
landRecs = []
n = 0
nLand = 0
for rec in recs:
n += 1
lat = rec['lat']
lon = rec['lon']
if overLand(lat,lon):
nLand += 1
landRecs.append(rec)
print nLand, n
obj['records'] = landRecs
json.dump(obj, file(opath, "w"), indent=4)
print "Num Recs: %d over land: %d\n" % (n, nLand)
#filterByLand("../Viewer/data/dancing_data.json", "../Viewer/data/dance_data.json")
#filterByLand("../Viewer/data/temples0_data.json", "../Viewer/data/temples_data.json")
#filterByLand("../Viewer/data/climbing0_data.json", "../Viewer/data/climbing_data.json")
#filterByLand("../Viewer/data/temples0_data.json", "../Viewer/data/temples_data.json")
#filterByLand("../Viewer/data/hiking0_data.json", "../Viewer/data/hiking_data.json")
filterByLand("../Viewer/data/gardens0_data.json", "../Viewer/data/gardens_data.json")
filterByLand("../Viewer/data/surfing0_data.json", "../Viewer/data/surfing_data.json")
| mit | 942,163,728,044,186,400 | 31.971429 | 88 | 0.641248 | false |
joshzarrabi/e-mission-server | emission/analysis/modelling/tour_model/representatives.py | 1 | 8413 | # standard imports
import numpy
import math
import copy
# our imports
from emission.core.wrapper.trip_old import Trip, Coordinate
import emission.storage.decorations.trip_queries as esdtq
import emission.storage.decorations.place_queries as esdpq
"""
This class creates a group of representatives for each cluster
and defines the locations that the user visits from those clusters.
The purpose of this class is to get the list of clusters with
start and end points to create the tour graph.
To use this class, as input it takes
- data: A list of trip objects
- labels: A list of integers that define the clusters on the data.
The labels are calculated in cluster pipeline from the clusters. The labels
should be a list of integers of the same length as the list of data, where
different numbers indicate different clusters.
"""
class representatives:
def __init__(self, data, labels, old=True):
self.data = data
self.is_old = old
if not self.data:
self.data = []
self.labels = labels
if not self.labels:
self.labels = []
if len(self.data) != len(self.labels):
raise ValueError('Length of data must equal length of clustering labels.')
self.num_clusters = len(set(self.labels))
self.size = len(self.data)
#get the list of clusters based on the labels
def list_clusters(self):
if not self.data:
self.clusters = []
return
self.clusters = [0] * self.num_clusters
for i in range(self.num_clusters):
self.clusters[i] = []
for i in range(len(self.labels)):
a = self.labels[i]
self.clusters[a].append(self.data[i])
#get the representatives for each cluster
def get_reps(self):
self.reps = []
if not self.data:
return
for cluster in self.clusters:
points = [[], [], [], []]
for c in cluster:
if self.is_old:
points[0].append(c.trip_start_location.lat)
points[1].append(c.trip_start_location.lon)
points[2].append(c.trip_end_location.lat)
points[3].append(c.trip_end_location.lon)
else:
# We want (lat, lon) to be consistent with old above.
# But in the new, our data is in geojson so it is (lon, lat).
# Fix it by flipping the order of the indices
points[0].append(c.start_loc["coordinates"][1])
points[1].append(c.start_loc["coordinates"][0])
points[2].append(c.end_loc["coordinates"][1])
points[3].append(c.end_loc["coordinates"][0])
centers = numpy.mean(points, axis=1)
a = Trip(None, None, None, None, None, None, Coordinate(centers[0], centers[1]), Coordinate(centers[2], centers[3]))
self.reps.append(a)
#map the representatives
def map(self):
import pygmaps
mymap = pygmaps.maps(37.5, -122.32, 10)
for t in self.reps:
start_lat = t.trip_start_location.lat
start_lon = t.trip_start_location.lon
end_lat = t.trip_end_location.lat
end_lon = t.trip_end_location.lon
path = [(start_lat, start_lon), (end_lat, end_lon)]
mymap.addpath(path)
for l in self.locs:
mymap.addpoint(l.lat, l.lon, '#0000FF')
mymap.draw('./myreps.html')
#define the set of locations for the data
def locations(self):
self.bins = []
self.locs = []
if not self.data:
self.num_locations = 0
return
for a in range(self.num_clusters):
added_start = False
added_end = False
for bin in self.bins:
if self.match('start', a, bin) and not added_start:
bin.append(('start', a))
added_start = True
if self.match('end', a, bin) and not added_end:
bin.append(('end', a))
added_end = True
if not added_start:
newbin = [('start', a)]
if self.match('end', a, newbin) and not added_end:
newbin.append(('end', a))
added_end = True
self.bins.append(newbin)
if not added_end:
self.bins.append([('end', a)])
self.num_locations = len(self.bins)
self.locs = []
for bin in self.bins:
locs = []
for b in bin:
if b[0] == 'start':
point = self.reps[b[1]].trip_start_location
if b[0] == 'end':
point = self.reps[b[1]].trip_end_location
locs.append([point.lat, point.lon])
locs = numpy.mean(locs, axis=0)
coord = Coordinate(locs[0], locs[1])
self.locs.append(coord)
#create the input to the tour graph
def cluster_dict(self):
self.tour_dict = [0] * self.num_clusters
if not self.data:
self.tour_dict = []
self.self_loops_tour_dict = []
return
for i in range(self.num_clusters):
a = {'sections' : self.clusters[i]}
self.tour_dict[i] = a
for i in range(self.num_clusters):
start_places = []
end_places = []
for t in self.tour_dict[i]["sections"]:
start = esdpq.get_place(t.start_place)
end = esdpq.get_place(t.end_place)
start_places.append(start)
end_places.append(end)
self.tour_dict[i]["start_places"] = start_places
self.tour_dict[i]["end_places"] = end_places
for i in range(self.num_locations):
bin = self.bins[i]
for b in bin:
cluster = b[1]
label = b[0]
self.tour_dict[cluster][label] = i
for i in range(self.num_clusters):
cluster = self.tour_dict[i]
start_coords = self.locs[cluster['start']]
end_coords = self.locs[cluster['end']]
self.tour_dict[i]['start_coords'] = start_coords
self.tour_dict[i]['end_coords'] = end_coords
self.self_loops_tour_dict = copy.deepcopy(self.tour_dict)
for i in range(len(self.tour_dict)-1, -1, -1):
cluster = self.tour_dict[i]
if cluster['start'] == cluster['end'] and len(self.tour_dict) > 1:
self.tour_dict.remove(cluster)
newlocs = []
for cluster in self.tour_dict:
if cluster['start'] not in newlocs:
newlocs.append(cluster['start'])
if cluster['end'] not in newlocs:
newlocs.append(cluster['end'])
for i in range(len(self.tour_dict)):
self.tour_dict[i]['start'] = newlocs.index(self.tour_dict[i]['start'])
self.tour_dict[i]['end'] = newlocs.index(self.tour_dict[i]['end'])
#check whether a point is close to all points in a bin
def match(self, label, a, bin):
if label == 'start':
pointa = self.reps[a].trip_start_location
elif label == 'end':
pointa = self.reps[a].trip_end_location
for b in bin:
if b[0] == 'start':
pointb = self.reps[b[1]].trip_start_location
elif b[0] == 'end':
pointb = self.reps[b[1]].trip_end_location
if self.distance(pointa.lat, pointa.lon, pointb.lat, pointb.lon) > 300:
return False
return True
#the meter distance between two points
def distance(self, lat1, lon1, lat2, lon2):
R = 6371000
rlat1 = math.radians(lat1)
rlat2 = math.radians(lat2)
lon = math.radians(lon2 - lon1);
lat = math.radians(lat2-lat1);
a = math.sin(lat/2.0)**2 + math.cos(rlat1)*math.cos(rlat2) * math.sin(lon/2.0)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = R * c
return d
| bsd-3-clause | -1,976,690,382,363,518,000 | 37.313084 | 128 | 0.523476 | false |
shhui/nova | nova/db/sqlalchemy/api.py | 1 | 220845 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import sys
import time
import uuid
from oslo.config import cfg
import six
from sqlalchemy import and_
from sqlalchemy import Boolean
from sqlalchemy.exc import DataError
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import noload
from sqlalchemy.schema import Table
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import select
from sqlalchemy.sql import func
from sqlalchemy import String
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
import nova.context
from nova.db.sqlalchemy import models
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import quota
db_opts = [
cfg.StrOpt('osapi_compute_unique_server_name_scope',
default='',
help='When set, compute API will consider duplicate hostnames '
'invalid within the specified scope, regardless of case. '
'Should be empty, "project" or "global".'),
]
connection_opts = [
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(connection_opts, group='database')
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('connection',
'nova.openstack.common.db.options',
group='database')
LOG = logging.getLogger(__name__)
_MASTER_FACADE = None
_SLAVE_FACADE = None
def _create_facade_lazily(use_slave=False):
global _MASTER_FACADE
global _SLAVE_FACADE
return_slave = use_slave and CONF.database.slave_connection
if not return_slave:
if _MASTER_FACADE is None:
_MASTER_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database.iteritems())
)
return _MASTER_FACADE
else:
if _SLAVE_FACADE is None:
_SLAVE_FACADE = db_session.EngineFacade(
CONF.database.slave_connection,
**dict(CONF.database.iteritems())
)
return _SLAVE_FACADE
def get_engine(use_slave=False):
facade = _create_facade_lazily(use_slave)
return facade.get_engine()
def get_session(use_slave=False, **kwargs):
facade = _create_facade_lazily(use_slave)
return facade.get_session(**kwargs)
_SHADOW_TABLE_PREFIX = 'shadow_'
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_admin_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`nova.context.authorize_project_context` and
:py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_instance_exists_using_uuid(f):
"""Decorator to require the specified instance to exist.
Requires the wrapped function to use context and instance_uuid as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, instance_uuid, *args, **kwargs):
instance_get_by_uuid(context, instance_uuid)
return f(context, instance_uuid, *args, **kwargs)
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use context and aggregate_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warn(_("Deadlock detected when running "
"'%(func_name)s': Retrying..."),
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param use_slave: If true, use slave_connection
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id. If set to 'allow_none',
restriction includes project_id = None.
:param base_model: Where model_query is passed a "model" parameter which is
not a subclass of NovaBase, we should pass an extra base_model
parameter that is a subclass of NovaBase and corresponds to the
model parameter.
"""
use_slave = kwargs.get('use_slave') or False
if CONF.database.slave_connection == '':
use_slave = False
session = kwargs.get('session') or get_session(use_slave=use_slave)
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only', False)
def issubclassof_nova_base(obj):
return isinstance(obj, type) and issubclass(obj, models.NovaBase)
base_model = model
if not issubclassof_nova_base(base_model):
base_model = kwargs.get('base_model', None)
if not issubclassof_nova_base(base_model):
raise Exception(_("model or base_model parameter should be "
"subclass of NovaBase"))
query = session.query(model, *args)
default_deleted_value = base_model.__mapper__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(base_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(base_model.deleted != default_deleted_value)
else:
raise Exception(_("Unrecognized read_deleted value '%s'")
% read_deleted)
if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.\
filter(or_(base_model.project_id == context.project_id,
base_model.project_id == None))
else:
query = query.filter_by(project_id=context.project_id)
return query
def exact_filter(query, model, filters, legal_keys):
"""Applies exact match filtering to a query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param model: model object the query applies to, for IN-style
filtering
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if key in ('metadata', 'system_metadata'):
column_attr = getattr(model, key)
if isinstance(value, list):
for item in value:
for k, v in item.iteritems():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
else:
for k, v in value.iteritems():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def convert_objects_related_datetimes(values, *datetime_keys):
for key in datetime_keys:
if key in values and values[key]:
if isinstance(values[key], six.string_types):
values[key] = timeutils.parse_strtime(values[key])
# NOTE(danms): Strip UTC timezones from datetimes, since they're
# stored that way in the database
values[key] = values[key].replace(tzinfo=None)
return values
def _sync_instances(context, project_id, user_id, session):
return dict(zip(('instances', 'cores', 'ram'),
_instance_data_get_for_user(
context, project_id, user_id, session)))
def _sync_floating_ips(context, project_id, user_id, session):
return dict(floating_ips=_floating_ip_count_by_project(
context, project_id, session))
def _sync_fixed_ips(context, project_id, user_id, session):
return dict(fixed_ips=_fixed_ip_count_by_project(
context, project_id, session))
def _sync_security_groups(context, project_id, user_id, session):
return dict(security_groups=_security_group_count_by_project_and_user(
context, project_id, user_id, session))
QUOTA_SYNC_FUNCTIONS = {
'_sync_instances': _sync_instances,
'_sync_floating_ips': _sync_floating_ips,
'_sync_fixed_ips': _sync_fixed_ips,
'_sync_security_groups': _sync_security_groups,
}
###################
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.iteritems():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
# method signature requires us to return an iterable even if for OR
# operator this will actually be a single clause
return [or_(*[field == value for value in self.values])]
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
###################
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
count = model_query(context, models.Service, session=session).\
filter_by(id=service_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.ServiceNotFound(service_id=service_id)
model_query(context, models.ComputeNode, session=session).\
filter_by(service_id=service_id).\
soft_delete(synchronize_session=False)
def _service_get(context, service_id, with_compute_node=True, session=None):
query = model_query(context, models.Service, session=session).\
filter_by(id=service_id)
if with_compute_node:
query = query.options(joinedload('compute_node'))
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def service_get(context, service_id):
return _service_get(context, service_id)
@require_admin_context
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
@require_admin_context
def service_get_all_by_host(context, host):
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
@require_admin_context
def service_get_by_compute_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
options(joinedload('compute_node')).\
filter_by(host=host).\
filter_by(topic=CONF.compute_topic).\
first()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
try:
service_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'binary' in e.columns:
raise exception.ServiceBinaryExists(host=values.get('host'),
binary=values.get('binary'))
raise exception.ServiceTopicExists(host=values.get('host'),
topic=values.get('topic'))
return service_ref
@require_admin_context
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id,
with_compute_node=False, session=session)
service_ref.update(values)
return service_ref
###################
def compute_node_get(context, compute_id):
return _compute_node_get(context, compute_id)
def _compute_node_get(context, compute_id, session=None):
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
options(joinedload('service')).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
@require_admin_context
def compute_node_get_by_service_id(context, service_id):
result = model_query(context, models.ComputeNode, read_deleted='no').\
filter_by(service_id=service_id).\
first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def compute_node_get_all(context, no_date_fields):
# NOTE(msdubov): Using lower-level 'select' queries and joining the tables
# manually here allows to gain 3x speed-up and to have 5x
# less network load / memory usage compared to the sqla ORM.
engine = get_engine()
# Retrieve ComputeNode, Service
compute_node = models.ComputeNode.__table__
service = models.Service.__table__
with engine.begin() as conn:
redundant_columns = set(['deleted_at', 'created_at', 'updated_at',
'deleted']) if no_date_fields else set([])
def filter_columns(table):
return [c for c in table.c if c.name not in redundant_columns]
compute_node_query = select(filter_columns(compute_node)).\
where(compute_node.c.deleted == 0).\
order_by(compute_node.c.service_id)
compute_node_rows = conn.execute(compute_node_query).fetchall()
service_query = select(filter_columns(service)).\
where((service.c.deleted == 0) &
(service.c.binary == 'nova-compute')).\
order_by(service.c.id)
service_rows = conn.execute(service_query).fetchall()
# Join ComputeNode & Service manually.
services = {}
for proxy in service_rows:
services[proxy['id']] = dict(proxy.items())
compute_nodes = []
for proxy in compute_node_rows:
node = dict(proxy.items())
node['service'] = services.get(proxy['service_id'])
compute_nodes.append(node)
return compute_nodes
@require_admin_context
def compute_node_search_by_hypervisor(context, hypervisor_match):
field = models.ComputeNode.hypervisor_hostname
return model_query(context, models.ComputeNode).\
options(joinedload('service')).\
filter(field.like('%%%s%%' % hypervisor_match)).\
all()
@require_admin_context
def compute_node_create(context, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data.
"""
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
convert_objects_related_datetimes(values, *datetime_keys)
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save()
return compute_node_ref
@require_admin_context
@_retry_on_deadlock
def compute_node_update(context, compute_id, values):
"""Updates the ComputeNode record with the most recent data."""
session = get_session()
with session.begin():
compute_ref = _compute_node_get(context, compute_id, session=session)
# Always update this, even if there's going to be no other
# changes in data. This ensures that we invalidate the
# scheduler cache of compute node data in case of races.
values['updated_at'] = timeutils.utcnow()
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
convert_objects_related_datetimes(values, *datetime_keys)
compute_ref.update(values)
return compute_ref
@require_admin_context
def compute_node_delete(context, compute_id):
"""Delete a ComputeNode record."""
session = get_session()
with session.begin():
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
soft_delete(synchronize_session=False)
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
def compute_node_statistics(context):
"""Compute statistics over all compute nodes."""
result = model_query(context,
func.count(models.ComputeNode.id),
func.sum(models.ComputeNode.vcpus),
func.sum(models.ComputeNode.memory_mb),
func.sum(models.ComputeNode.local_gb),
func.sum(models.ComputeNode.vcpus_used),
func.sum(models.ComputeNode.memory_mb_used),
func.sum(models.ComputeNode.local_gb_used),
func.sum(models.ComputeNode.free_ram_mb),
func.sum(models.ComputeNode.free_disk_gb),
func.sum(models.ComputeNode.current_workload),
func.sum(models.ComputeNode.running_vms),
func.sum(models.ComputeNode.disk_available_least),
base_model=models.ComputeNode,
read_deleted="no").first()
# Build a dict of the info--making no assumptions about result
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
'current_workload', 'running_vms', 'disk_available_least')
return dict((field, int(result[idx] or 0))
for idx, field in enumerate(fields))
###################
@require_admin_context
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.iteritems():
certificate_ref[key] = value
certificate_ref.save()
return certificate_ref
@require_admin_context
def certificate_get_all_by_project(context, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_admin_context
def certificate_get_all_by_user(context, user_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@require_admin_context
def certificate_get_all_by_user_and_project(context, user_id, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
###################
@require_context
def floating_ip_get(context, id):
try:
result = model_query(context, models.FloatingIp, project_only=True).\
filter_by(id=id).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFound(id=id)
except DataError:
msg = _("Invalid floating ip id %s in request") % id
LOG.warn(msg)
raise exception.InvalidID(id=id)
return result
@require_context
def floating_ip_get_pools(context):
pools = []
for result in model_query(context, models.FloatingIp.pool,
base_model=models.FloatingIp).distinct():
pools.append({'name': result[0]})
return pools
@require_context
def floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
nova.context.authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = model_query(context, models.FloatingIp,
session=session, read_deleted="no").\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not floating_ip_ref:
raise exception.NoMoreFloatingIps()
floating_ip_ref['project_id'] = project_id
floating_ip_ref['auto_assigned'] = auto_assigned
session.add(floating_ip_ref)
return floating_ip_ref['address']
@require_context
def floating_ip_bulk_create(context, ips):
session = get_session()
with session.begin():
for ip in ips:
model = models.FloatingIp()
model.update(ip)
try:
# NOTE(boris-42): To get existing address we have to do each
# time session.flush()..
session.add(model)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=ip['address'])
def _ip_range_splitter(ips, block_size=256):
"""Yields blocks of IPs no more than block_size elements long."""
out = []
count = 0
for ip in ips:
out.append(ip['address'])
count += 1
if count > block_size - 1:
yield out
out = []
count = 0
if out:
yield out
@require_context
def floating_ip_bulk_destroy(context, ips):
session = get_session()
with session.begin():
project_id_to_quota_count = collections.defaultdict(int)
for ip_block in _ip_range_splitter(ips):
# Find any floating IPs that were not auto_assigned and
# thus need quota released.
query = model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
filter_by(auto_assigned=False)
rows = query.all()
for row in rows:
# The count is negative since we release quota by
# reserving negative quota.
project_id_to_quota_count[row['project_id']] -= 1
# Delete the floating IPs.
model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
soft_delete(synchronize_session='fetch')
# Delete the quotas, if needed.
for project_id, count in project_id_to_quota_count.iteritems():
try:
reservations = quota.QUOTAS.reserve(context,
project_id=project_id,
floating_ips=count)
quota.QUOTAS.commit(context,
reservations,
project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update usages bulk "
"deallocating floating IP"))
@require_context
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
try:
floating_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return floating_ip_ref
def _floating_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return model_query(context, models.FloatingIp, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
count()
@require_context
@_retry_on_deadlock
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
session = get_session()
with session.begin():
floating_ip_ref = _floating_ip_get_by_address(context,
floating_address,
session=session)
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(address=fixed_address).\
options(joinedload('network')).\
first()
if floating_ip_ref.fixed_ip_id == fixed_ip_ref["id"]:
return None
floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
floating_ip_ref.host = host
return fixed_ip_ref
@require_context
def floating_ip_deallocate(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
update({'project_id': None,
'host': None,
'auto_assigned': False})
@require_context
def floating_ip_destroy(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
delete()
@require_context
def floating_ip_disassociate(context, address):
session = get_session()
with session.begin():
floating_ip_ref = model_query(context,
models.FloatingIp,
session=session).\
filter_by(address=address).\
first()
if not floating_ip_ref:
raise exception.FloatingIpNotFoundForAddress(address=address)
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(id=floating_ip_ref['fixed_ip_id']).\
options(joinedload('network')).\
first()
floating_ip_ref.fixed_ip_id = None
floating_ip_ref.host = None
return fixed_ip_ref
@require_context
def floating_ip_set_auto_assigned(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
update({'auto_assigned': True})
def _floating_ip_get_all(context, session=None):
return model_query(context, models.FloatingIp, read_deleted="no",
session=session)
@require_admin_context
def floating_ip_get_all(context):
floating_ip_refs = _floating_ip_get_all(context).all()
if not floating_ip_refs:
raise exception.NoFloatingIpsDefined()
return floating_ip_refs
@require_admin_context
def floating_ip_get_all_by_host(context, host):
floating_ip_refs = _floating_ip_get_all(context).\
filter_by(host=host).\
all()
if not floating_ip_refs:
raise exception.FloatingIpNotFoundForHost(host=host)
return floating_ip_refs
@require_context
def floating_ip_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
options(joinedload_all('fixed_ip.instance')).\
all()
@require_context
def floating_ip_get_by_address(context, address):
return _floating_ip_get_by_address(context, address)
def _floating_ip_get_by_address(context, address, session=None):
# if address string is empty explicitly set it to None
if not address:
address = None
try:
result = model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid floating IP %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
if result.project_id and nova.context.is_user_context(context):
nova.context.authorize_project_context(context, result.project_id)
return result
@require_context
def floating_ip_get_by_fixed_address(context, fixed_address):
return model_query(context, models.FloatingIp).\
outerjoin(models.FixedIp,
models.FixedIp.id ==
models.FloatingIp.fixed_ip_id).\
filter(models.FixedIp.address == fixed_address).\
all()
@require_context
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
return model_query(context, models.FloatingIp).\
filter_by(fixed_ip_id=fixed_ip_id).\
all()
@require_context
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
float_ip_ref = _floating_ip_get_by_address(context, address, session)
float_ip_ref.update(values)
try:
float_ip_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
def _dnsdomain_get(context, session, fqdomain):
return model_query(context, models.DNSDomain,
session=session, read_deleted="no").\
filter_by(domain=fqdomain).\
with_lockmode('update').\
first()
@require_context
def dnsdomain_get(context, fqdomain):
session = get_session()
with session.begin():
return _dnsdomain_get(context, session, fqdomain)
def _dnsdomain_get_or_create(context, session, fqdomain):
domain_ref = _dnsdomain_get(context, session, fqdomain)
if not domain_ref:
dns_ref = models.DNSDomain()
dns_ref.update({'domain': fqdomain,
'availability_zone': None,
'project_id': None})
return dns_ref
return domain_ref
@require_admin_context
def dnsdomain_register_for_zone(context, fqdomain, zone):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'private'
domain_ref.availability_zone = zone
session.add(domain_ref)
@require_admin_context
def dnsdomain_register_for_project(context, fqdomain, project):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'public'
domain_ref.project_id = project
session.add(domain_ref)
@require_admin_context
def dnsdomain_unregister(context, fqdomain):
model_query(context, models.DNSDomain).\
filter_by(domain=fqdomain).\
delete()
@require_context
def dnsdomain_list(context):
query = model_query(context, models.DNSDomain, read_deleted="no")
return [row.domain for row in query.all()]
def dnsdomain_get_all(context):
return model_query(context, models.DNSDomain, read_deleted="no").all()
###################
@require_admin_context
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False):
"""Keyword arguments:
reserved -- should be a boolean value(True or False), exact value will be
used to filter on the fixed ip address
"""
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == None)
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if fixed_ip_ref is None:
raise exception.FixedIpNotFoundForNetwork(address=address,
network_uuid=network_id)
if fixed_ip_ref.instance_uuid:
raise exception.FixedIpAlreadyInUse(address=address,
instance_uuid=instance_uuid)
if not fixed_ip_ref.network_id:
fixed_ip_ref.network_id = network_id
fixed_ip_ref.instance_uuid = instance_uuid
session.add(fixed_ip_ref)
return fixed_ip_ref
@require_admin_context
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == None)
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
raise exception.NoMoreFixedIps()
if fixed_ip_ref['network_id'] is None:
fixed_ip_ref['network'] = network_id
if instance_uuid:
fixed_ip_ref['instance_uuid'] = instance_uuid
if host:
fixed_ip_ref['host'] = host
session.add(fixed_ip_ref)
return fixed_ip_ref
@require_context
def fixed_ip_create(context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
try:
fixed_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=values['address'])
return fixed_ip_ref
@require_context
def fixed_ip_bulk_create(context, ips):
session = get_session()
with session.begin():
for ip in ips:
model = models.FixedIp()
model.update(ip)
try:
# NOTE (vsergeyev): To get existing address we have to do each
# time session.flush().
# See related note at line 697.
session.add(model)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=ip['address'])
@require_context
def fixed_ip_disassociate(context, address):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update({'instance_uuid': None})
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(context, host, time):
session = get_session()
# NOTE(vish): only update fixed ips that "belong" to this
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
with session.begin():
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == True),
models.Network.host == host)
result = model_query(context, models.FixedIp.id,
base_model=models.FixedIp, read_deleted="no",
session=session).\
filter(models.FixedIp.allocated == False).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(host_filter).\
all()
fixed_ip_ids = [fip[0] for fip in result]
if not fixed_ip_ids:
return 0
result = model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_uuid': None,
'leased': False,
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
@require_context
def fixed_ip_get(context, id, get_network=False):
query = model_query(context, models.FixedIp).filter_by(id=id)
if get_network:
query = query.options(joinedload('network'))
result = query.first()
if not result:
raise exception.FixedIpNotFound(id=id)
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'])
nova.context.authorize_project_context(context, instance.project_id)
return result
@require_admin_context
def fixed_ip_get_all(context):
result = model_query(context, models.FixedIp, read_deleted="yes").all()
if not result:
raise exception.NoFixedIpsDefined()
return result
@require_context
def fixed_ip_get_by_address(context, address, columns_to_join=None):
return _fixed_ip_get_by_address(context, address,
columns_to_join=columns_to_join)
def _fixed_ip_get_by_address(context, address, session=None,
columns_to_join=None):
if session is None:
session = get_session()
if columns_to_join is None:
columns_to_join = []
with session.begin(subtransactions=True):
try:
result = model_query(context, models.FixedIp, session=session)
for column in columns_to_join:
result = result.options(joinedload_all(column))
result = result.filter_by(address=address).first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = _instance_get_by_uuid(
context.elevated(read_deleted='yes'),
result['instance_uuid'],
session
)
nova.context.authorize_project_context(context,
instance.project_id)
return result
@require_admin_context
def fixed_ip_get_by_address_detailed(context, address):
""":returns: a tuple of (models.FixedIp, models.Network, models.Instance)
"""
try:
result = model_query(context, models.FixedIp,
models.Network, models.Instance).\
filter_by(address=address).\
outerjoin((models.Network,
models.Network.id ==
models.FixedIp.network_id)).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.FixedIp.instance_uuid)).\
first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
return result
@require_context
def fixed_ip_get_by_floating_address(context, floating_address):
return model_query(context, models.FixedIp).\
outerjoin(models.FloatingIp,
models.FloatingIp.fixed_ip_id ==
models.FixedIp.id).\
filter(models.FloatingIp.address == floating_address).\
first()
# NOTE(tr3buchet) please don't invent an exception here, empty list is fine
@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
all()
if not result:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
return result
@require_admin_context
def fixed_ip_get_by_host(context, host):
session = get_session()
with session.begin():
instance_uuids = _instance_get_all_uuids_by_host(context, host,
session=session)
if not instance_uuids:
return []
return model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
all()
@require_context
def fixed_ip_get_by_network_host(context, network_id, host):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(host=host).\
first()
if not result:
raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
host=host)
return result
@require_context
def fixed_ips_by_virtual_interface(context, vif_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(virtual_interface_id=vif_id).\
all()
return result
@require_context
def fixed_ip_update(context, address, values):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update(values)
def _fixed_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.FixedIp.id,
base_model=models.FixedIp, read_deleted="no",
session=session).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(models.Instance.project_id == project_id).\
count()
###################
@require_context
def virtual_interface_create(context, values):
"""Create a new virtual interface record in the database.
:param values: = dict containing column values
"""
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save()
except db_exc.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
def _virtual_interface_query(context, session=None, use_slave=False):
return model_query(context, models.VirtualInterface, session=session,
read_deleted="no", use_slave=use_slave)
@require_context
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@require_context
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table.
:param address: = the address of the interface you're looking to get
"""
try:
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
except DataError:
msg = _("Invalid virtual interface address %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
return vif_ref
@require_context
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table.
:param vif_uuid: the uuid of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(uuid=vif_uuid).\
first()
return vif_ref
@require_context
@require_instance_exists_using_uuid
def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
vif_refs = _virtual_interface_query(context, use_slave=use_slave).\
filter_by(instance_uuid=instance_uuid).\
all()
return vif_refs
@require_context
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
network_id):
"""Gets virtual interface for instance that's associated with network."""
vif_ref = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(network_id=network_id).\
first()
return vif_ref
@require_context
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
:param instance_uuid: = uuid of instance
"""
_virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
@require_context
def virtual_interface_get_all(context):
"""Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _validate_unique_server_name(context, session, name):
if not CONF.osapi_compute_unique_server_name_scope:
return
lowername = name.lower()
base_query = model_query(context, models.Instance, session=session,
read_deleted=False).\
filter(func.lower(models.Instance.hostname) == lowername)
if CONF.osapi_compute_unique_server_name_scope == 'project':
instance_with_same_name = base_query.\
filter_by(project_id=context.project_id).\
count()
elif CONF.osapi_compute_unique_server_name_scope == 'global':
instance_with_same_name = base_query.count()
else:
msg = _('Unknown osapi_compute_unique_server_name_scope value: %s'
' Flag must be empty, "global" or'
' "project"') % CONF.osapi_compute_unique_server_name_scope
LOG.warn(msg)
return
if instance_with_same_name > 0:
raise exception.InstanceExists(name=lowername)
def _handle_objects_related_type_conversions(values):
"""Make sure that certain things in values (which may have come from
an objects.instance.Instance object) are in suitable form for the
database.
"""
# NOTE(danms): Make sure IP addresses are passed as strings to
# the database engine
for key in ('access_ip_v4', 'access_ip_v6'):
if key in values and values[key] is not None:
values[key] = str(values[key])
datetime_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at')
convert_objects_related_datetimes(values, *datetime_keys)
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
"""
values = values.copy()
values['metadata'] = _metadata_refs(
values.get('metadata'), models.InstanceMetadata)
values['system_metadata'] = _metadata_refs(
values.get('system_metadata'), models.InstanceSystemMetadata)
_handle_objects_related_type_conversions(values)
instance_ref = models.Instance()
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
instance_ref['info_cache'].update(info_cache)
security_groups = values.pop('security_groups', [])
instance_ref.update(values)
def _get_sec_group_models(session, security_groups):
models = []
default_group = security_group_ensure_default(context)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
security_groups = [x for x in security_groups if x != 'default']
if security_groups:
models.extend(_security_group_get_by_names(context,
session, context.project_id, security_groups))
return models
session = get_session()
with session.begin():
if 'hostname' in values:
_validate_unique_server_name(context, session, values['hostname'])
instance_ref.security_groups = _get_sec_group_models(session,
security_groups)
session.add(instance_ref)
# create the instance uuid to ec2_id mapping entry for instance
ec2_instance_create(context, instance_ref['uuid'])
return instance_ref
def _instance_data_get_for_user(context, project_id, user_id, session=None):
result = model_query(context,
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb),
base_model=models.Instance,
session=session).\
filter_by(project_id=project_id)
if user_id:
result = result.filter_by(user_id=user_id).first()
else:
result = result.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0, result[2] or 0)
@require_context
def instance_destroy(context, instance_uuid, constraint=None):
session = get_session()
with session.begin():
if uuidutils.is_uuid_like(instance_uuid):
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session)
else:
raise exception.InvalidUUID(instance_uuid)
query = model_query(context, models.Instance, session=session).\
filter_by(uuid=instance_uuid)
if constraint is not None:
query = constraint.apply(models.Instance, query)
count = query.soft_delete()
if count == 0:
raise exception.ConstraintNotMet()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceInfoCache, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceMetadata, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceFault, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
return instance_ref
@require_context
def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
return _instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join, use_slave=use_slave)
def _instance_get_by_uuid(context, uuid, session=None,
columns_to_join=None, use_slave=False):
result = _build_instance_get(context, session=session,
columns_to_join=columns_to_join,
use_slave=use_slave).\
filter_by(uuid=uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
def instance_get(context, instance_id, columns_to_join=None):
try:
result = _build_instance_get(context, columns_to_join=columns_to_join
).filter_by(id=instance_id).first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
except DataError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
msg = _("Invalid instance id %s in request") % instance_id
LOG.warn(msg)
raise exception.InvalidID(id=instance_id)
def _build_instance_get(context, session=None,
columns_to_join=None, use_slave=False):
query = model_query(context, models.Instance, session=session,
project_only=True, use_slave=use_slave).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
if column in ['info_cache', 'security_groups']:
# Already always joined above
continue
query = query.options(joinedload(column))
#NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
return query
def _instances_fill_metadata(context, instances,
manual_joins=None, use_slave=False):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
:param context: security context
:param instances: list of instances to fill
:param manual_joins: list of tables to manually join (can be any
combination of 'metadata' and 'system_metadata' or
None to take the default of both)
"""
uuids = [inst['uuid'] for inst in instances]
if manual_joins is None:
manual_joins = ['metadata', 'system_metadata']
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
for row in _instance_metadata_get_multi(context, uuids,
use_slave=use_slave):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids,
use_slave=use_slave):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
if 'pci_devices' in manual_joins:
for row in _instance_pcidevs_get_multi(context, uuids):
pcidevs[row['instance_uuid']].append(row)
filled_instances = []
for inst in instances:
inst = dict(inst.iteritems())
inst['system_metadata'] = sys_meta[inst['uuid']]
inst['metadata'] = meta[inst['uuid']]
if 'pci_devices' in manual_joins:
inst['pci_devices'] = pcidevs[inst['uuid']]
filled_instances.append(inst)
return filled_instances
def _manual_join_columns(columns_to_join):
manual_joins = []
for column in ('metadata', 'system_metadata', 'pci_devices'):
if column in columns_to_join:
columns_to_join.remove(column)
manual_joins.append(column)
return manual_joins, columns_to_join
@require_context
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join = _manual_join_columns(columns_to_join)
query = model_query(context, models.Instance)
for column in columns_to_join:
query = query.options(joinedload(column))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
instances = query.all()
return _instances_fill_metadata(context, instances, manual_joins)
@require_context
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
limit=None, marker=None, columns_to_join=None,
use_slave=False):
"""Return instances that match all filters. Deleted instances
will be returned by default, unless there's a filter that says
otherwise.
Depending on the name of a filter, matching for that filter is
performed using either exact matching or as regular expression
matching. Exact matching is applied for the following filters:
['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'system_metadata']
A third type of filter (also using exact matching), filters
based on instance metadata tags when supplied under a special
key named 'filter'.
filters = {
'filter': [
{'name': 'tag-key', 'value': '<metakey>'},
{'name': 'tag-value', 'value': '<metaval>'},
{'name': 'tag:<metakey>', 'value': '<metaval>'}
]
}
Special keys are used to tweek the query further:
'changes-since' - only return instances updated after
'deleted' - only return (or exclude) deleted instances
'soft_deleted' - modify behavior of 'deleted' to either
include or exclude instances whose
vm_state is SOFT_DELETED.
"""
sort_fn = {'desc': desc, 'asc': asc}
if CONF.database.slave_connection == '':
use_slave = False
session = get_session(use_slave=use_slave)
if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join = _manual_join_columns(columns_to_join)
query_prefix = session.query(models.Instance)
for column in columns_to_join:
query_prefix = query_prefix.options(joinedload(column))
query_prefix = query_prefix.order_by(sort_fn[sort_dir](
getattr(models.Instance, sort_key)))
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = filters.copy()
if 'changes-since' in filters:
changes_since = timeutils.normalize_time(filters['changes-since'])
query_prefix = query_prefix.\
filter(models.Instance.updated_at >= changes_since)
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
if filters.pop('deleted'):
if filters.pop('soft_deleted', True):
deleted = or_(
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
query_prefix = query_prefix.\
filter(deleted)
else:
query_prefix = query_prefix.\
filter(models.Instance.deleted == models.Instance.id)
else:
query_prefix = query_prefix.\
filter_by(deleted=0)
if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == None
)
query_prefix = query_prefix.filter(not_soft_deleted)
if 'cleaned' in filters:
if filters.pop('cleaned'):
query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
else:
query_prefix = query_prefix.filter(models.Instance.cleaned == 0)
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state',
'system_metadata']
# Filter the query
query_prefix = exact_filter(query_prefix, models.Instance,
filters, exact_match_filter_names)
query_prefix = regex_filter(query_prefix, models.Instance, filters)
query_prefix = tag_filter(context, query_prefix, models.Instance,
models.InstanceMetadata,
models.InstanceMetadata.instance_uuid,
filters)
# paginate query
if marker is not None:
try:
marker = _instance_get_by_uuid(context, marker, session=session)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
query_prefix = sqlalchemyutils.paginate_query(query_prefix,
models.Instance, limit,
[sort_key, 'created_at', 'id'],
marker=marker,
sort_dir=sort_dir)
return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
def tag_filter(context, query, model, model_metadata,
model_uuid, filters):
"""Applies tag filtering to a query.
Returns the updated query. This method alters filters to remove
keys that are tags. This filters on resources by tags - this
method assumes that the caller will take care of access control
:param query: query to apply filters to
:param model: model object the query applies to
:param filters: dictionary of filters
"""
if filters.get('filter') is None:
return query
or_query = None
def _to_list(val):
if isinstance(val, dict):
val = val.values()
if not isinstance(val, (tuple, list, set)):
val = (val,)
return val
for filter_block in filters['filter']:
if not isinstance(filter_block, dict):
continue
filter_name = filter_block.get('name')
if filter_name is None:
continue
tag_name = filter_name[4:]
tag_val = _to_list(filter_block.get('value'))
if filter_name.startswith('tag-'):
if tag_name not in ['key', 'value']:
msg = _("Invalid field name: %s") % tag_name
raise exception.InvalidParameterValue(err=msg)
subq = getattr(model_metadata, tag_name).in_(tag_val)
or_query = subq if or_query is None else or_(or_query, subq)
elif filter_name.startswith('tag:'):
subq = model_query(context, model_uuid,
session=query.session, base_model=model_metadata).\
filter_by(key=tag_name).\
filter(model_metadata.value.in_(tag_val))
query = query.filter(model.uuid.in_(subq))
if or_query is not None:
subq = model_query(context, model_uuid,
session=query.session, base_model=model_metadata).\
filter(or_query)
query = query.filter(model.uuid.in_(subq))
return query
def regex_filter(query, model, filters):
"""Applies regular expression filtering to a query.
Returns the updated query.
:param query: query to apply filters to
:param model: model object the query applies to
:param filters: dictionary of filters with regex values
"""
regexp_op_map = {
'postgresql': '~',
'mysql': 'REGEXP',
'sqlite': 'REGEXP'
}
db_string = CONF.database.connection.split(':')[0].split('+')[0]
db_regexp_op = regexp_op_map.get(db_string, 'LIKE')
for filter_name in filters.iterkeys():
try:
column_attr = getattr(model, filter_name)
except AttributeError:
continue
if 'property' == type(column_attr).__name__:
continue
if db_regexp_op == 'LIKE':
query = query.filter(column_attr.op(db_regexp_op)(
'%' + str(filters[filter_name]) + '%'))
else:
query = query.filter(column_attr.op(db_regexp_op)(
str(filters[filter_name])))
return query
@require_context
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
"""Return instances and joins that were active during window."""
session = get_session()
query = session.query(models.Instance)
query = query.options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
filter(or_(models.Instance.terminated_at == None,
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
if host:
query = query.filter_by(host=host)
return _instances_fill_metadata(context, query.all())
def _instance_get_all_query(context, project_only=False,
joins=None, use_slave=False):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(context,
models.Instance,
project_only=project_only,
use_slave=use_slave)
for join in joins:
query = query.options(joinedload(join))
return query
@require_admin_context
def instance_get_all_by_host(context, host,
columns_to_join=None,
use_slave=False):
return _instances_fill_metadata(context,
_instance_get_all_query(context,
use_slave=use_slave).filter_by(host=host).all(),
manual_joins=columns_to_join,
use_slave=use_slave)
def _instance_get_all_uuids_by_host(context, host, session=None):
"""Return a list of the instance uuids on a given host.
Returns a list of UUIDs, not Instance model objects. This internal version
allows you to specify a session object as a kwarg.
"""
uuids = []
for tuple in model_query(context, models.Instance.uuid, read_deleted="no",
base_model=models.Instance, session=session).\
filter_by(host=host).\
all():
uuids.append(tuple[0])
return uuids
@require_admin_context
def instance_get_all_by_host_and_node(context, host, node):
return _instances_fill_metadata(context,
_instance_get_all_query(context, joins=[]).filter_by(host=host).
filter_by(node=node).all(), manual_joins=[])
@require_admin_context
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).
filter(models.Instance.instance_type_id != type_id).all())
# NOTE(jkoelker) This is only being left here for compat with floating
# ips. Currently the network_api doesn't return floaters
# in network_info. Once it starts return the model. This
# function and its call in compute/manager.py on 1829 can
# go away
@require_context
def instance_get_floating_address(context, instance_id):
instance = instance_get(context, instance_id)
fixed_ips = fixed_ip_get_by_instance(context, instance['uuid'])
if not fixed_ips:
return None
# NOTE(tr3buchet): this only gets the first fixed_ip
# won't find floating ips associated with other fixed_ips
floating_ips = floating_ip_get_by_fixed_address(context,
fixed_ips[0]['address'])
if not floating_ips:
return None
# NOTE(vish): this just returns the first floating ip
return floating_ips[0]['address']
@require_context
def instance_floating_address_get_all(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
fixed_ip_ids = model_query(context, models.FixedIp.id,
base_model=models.FixedIp).\
filter_by(instance_uuid=instance_uuid).\
all()
if not fixed_ip_ids:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
fixed_ip_ids = [fixed_ip_id.id for fixed_ip_id in fixed_ip_ids]
floating_ips = model_query(context, models.FloatingIp.address,
base_model=models.FloatingIp).\
filter(models.FloatingIp.fixed_ip_id.in_(fixed_ip_ids)).\
all()
return [floating_ip.address for floating_ip in floating_ips]
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
@require_admin_context
def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
# NOTE(danms): this is only used in the _poll_rebooting_instances()
# call in compute/manager, so we can avoid the metadata lookups
# explicitly
return _instances_fill_metadata(context,
model_query(context, models.Instance).
filter(models.Instance.updated_at <= reboot_window).
filter_by(task_state=task_states.REBOOTING).all(),
manual_joins=[])
@require_context
def instance_update(context, instance_uuid, values):
instance_ref = _instance_update(context, instance_uuid, values)[1]
return instance_ref
@require_context
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_uuid: = instance uuid
:param values: = dict containing column values
If "expected_task_state" exists in values, the update can only happen
when the task state before update matches expected_task_state. Otherwise
a UnexpectedTaskStateError is thrown.
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
return _instance_update(context, instance_uuid, values,
copy_old_instance=True,
columns_to_join=columns_to_join)
# NOTE(danms): This updates the instance's metadata list in-place and in
# the database to avoid stale data and refresh issues. It assumes the
# delete=True behavior of instance_metadata_update(...)
def _instance_metadata_update_in_place(context, instance, metadata_type, model,
metadata, session):
metadata = dict(metadata)
to_delete = []
for keyvalue in instance[metadata_type]:
key = keyvalue['key']
if key in metadata:
keyvalue['value'] = metadata.pop(key)
elif key not in metadata:
to_delete.append(keyvalue)
for condemned in to_delete:
condemned.soft_delete(session=session)
for key, value in metadata.iteritems():
newitem = model()
newitem.update({'key': key, 'value': value,
'instance_uuid': instance['uuid']})
session.add(newitem)
instance[metadata_type].append(newitem)
def _instance_update(context, instance_uuid, values, copy_old_instance=False,
columns_to_join=None):
session = get_session()
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
with session.begin():
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session,
columns_to_join=columns_to_join)
if "expected_task_state" in values:
# it is not a db column so always pop out
expected = values.pop("expected_task_state")
if not isinstance(expected, (tuple, list, set)):
expected = (expected,)
actual_state = instance_ref["task_state"]
if actual_state not in expected:
if actual_state == task_states.DELETING:
raise exception.UnexpectedDeletingTaskStateError(
actual=actual_state, expected=expected)
else:
raise exception.UnexpectedTaskStateError(
actual=actual_state, expected=expected)
if "expected_vm_state" in values:
expected = values.pop("expected_vm_state")
if not isinstance(expected, (tuple, list, set)):
expected = (expected,)
actual_state = instance_ref["vm_state"]
if actual_state not in expected:
raise exception.UnexpectedVMStateError(actual=actual_state,
expected=expected)
instance_hostname = instance_ref['hostname'] or ''
if ("hostname" in values and
values["hostname"].lower() != instance_hostname.lower()):
_validate_unique_server_name(context,
session,
values['hostname'])
if copy_old_instance:
old_instance_ref = copy.copy(instance_ref)
else:
old_instance_ref = None
metadata = values.get('metadata')
if metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'metadata',
models.InstanceMetadata,
values.pop('metadata'),
session)
system_metadata = values.get('system_metadata')
if system_metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'system_metadata',
models.InstanceSystemMetadata,
values.pop('system_metadata'),
session)
_handle_objects_related_type_conversions(values)
instance_ref.update(values)
session.add(instance_ref)
return (old_instance_ref, instance_ref)
def instance_add_security_group(context, instance_uuid, security_group_id):
"""Associate the given security group with the given instance."""
sec_group_ref = models.SecurityGroupInstanceAssociation()
sec_group_ref.update({'instance_uuid': instance_uuid,
'security_group_id': security_group_id})
sec_group_ref.save()
@require_context
def instance_remove_security_group(context, instance_uuid, security_group_id):
"""Disassociate the given security group from the given instance."""
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
filter_by(security_group_id=security_group_id).\
soft_delete()
###################
@require_context
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
:param session: = optional session object
"""
return model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
@require_context
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
:param session: = optional session object
"""
session = get_session()
with session.begin():
info_cache = model_query(context, models.InstanceInfoCache,
session=session).\
filter_by(instance_uuid=instance_uuid).\
first()
if info_cache and info_cache['deleted']:
raise exception.InstanceInfoCacheNotFound(
instance_uuid=instance_uuid)
elif not info_cache:
# NOTE(tr3buchet): just in case someone blows away an instance's
# cache entry, re-create it.
info_cache = models.InstanceInfoCache()
values['instance_uuid'] = instance_uuid
try:
info_cache.update(values)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to
# recreate the instance cache entry at the same time. First one
# wins.
pass
return info_cache
@require_context
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
:param session: = optional session object
"""
model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
###################
@require_context
def key_pair_create(context, values):
try:
key_pair_ref = models.KeyPair()
key_pair_ref.update(values)
key_pair_ref.save()
return key_pair_ref
except db_exc.DBDuplicateEntry:
raise exception.KeyPairExists(key_name=values['name'])
@require_context
def key_pair_destroy(context, user_id, name):
nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
soft_delete()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
@require_context
def key_pair_get(context, user_id, name):
nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
first()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@require_context
def key_pair_get_all_by_user(context, user_id):
nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
all()
def key_pair_count_by_user(context, user_id):
nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
count()
###################
@require_admin_context
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a project with a network.
called by project_get_networks under certain conditions
and network manager add_network_to_project()
only associate if the project doesn't already have a network
or if force is True
force solves race condition where a fresh project has multiple instance
builds simultaneously picked up by multiple network hosts which attempt
to associate the project with multiple networks
force should only be used as a direct consequence of user request
all automated requests should not use force
"""
session = get_session()
with session.begin():
def network_query(project_filter, id=None):
filter_kwargs = {'project_id': project_filter}
if id is not None:
filter_kwargs['id'] = id
return model_query(context, models.Network, session=session,
read_deleted="no").\
filter_by(**filter_kwargs).\
with_lockmode('update').\
first()
if not force:
# find out if project has a network
network_ref = network_query(project_id)
if force or not network_ref:
# in force mode or project doesn't have a network so associate
# with a new network
# get new network
network_ref = network_query(None, network_id)
if not network_ref:
raise exception.NoMoreNetworks()
# associate with network
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
network_ref['project_id'] = project_id
session.add(network_ref)
return network_ref
def _network_ips_query(context, network_id):
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id)
@require_admin_context
def network_count_reserved_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(reserved=True).\
count()
@require_admin_context
def network_create_safe(context, values):
network_ref = models.Network()
network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
try:
network_ref.save()
return network_ref
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
@require_admin_context
def network_delete_safe(context, network_id):
session = get_session()
with session.begin():
result = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(allocated=True).\
count()
if result != 0:
raise exception.NetworkInUse(network_id=network_id)
network_ref = _network_get(context, network_id=network_id,
session=session)
model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
soft_delete()
session.delete(network_ref)
@require_admin_context
def network_disassociate(context, network_id, disassociate_host,
disassociate_project):
net_update = {}
if disassociate_project:
net_update['project_id'] = None
if disassociate_host:
net_update['host'] = None
network_update(context, network_id, net_update)
def _network_get(context, network_id, session=None, project_only='allow_none'):
result = model_query(context, models.Network, session=session,
project_only=project_only).\
filter_by(id=network_id).\
first()
if not result:
raise exception.NetworkNotFound(network_id=network_id)
return result
@require_context
def network_get(context, network_id, project_only='allow_none'):
return _network_get(context, network_id, project_only=project_only)
@require_context
def network_get_all(context, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).all()
if not result:
raise exception.NoNetworksFound()
return result
@require_context
def network_get_all_by_uuids(context, network_uuids, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).\
filter(models.Network.uuid.in_(network_uuids)).\
all()
if not result:
raise exception.NoNetworksFound()
#check if the result contains all the networks
#we are looking for
for network_uuid in network_uuids:
found = False
for network in result:
if network['uuid'] == network_uuid:
found = True
break
if not found:
if project_only:
raise exception.NetworkNotFoundForProject(
network_uuid=network_uuid, project_id=context.project_id)
raise exception.NetworkNotFound(network_id=network_uuid)
return result
# NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods
# pylint: disable=C0103
@require_admin_context
def network_get_associated_fixed_ips(context, network_id, host=None):
# FIXME(sirp): since this returns fixed_ips, this would be better named
# fixed_ip_get_all_by_network.
# NOTE(vish): The ugly joins here are to solve a performance issue and
# should be removed once we can add and remove leases
# without regenerating the whole list
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
models.Instance.deleted == 0)
session = get_session()
query = session.query(models.FixedIp.address,
models.FixedIp.instance_uuid,
models.FixedIp.network_id,
models.FixedIp.virtual_interface_id,
models.VirtualInterface.address,
models.Instance.hostname,
models.Instance.updated_at,
models.Instance.created_at,
models.FixedIp.allocated,
models.FixedIp.leased).\
filter(models.FixedIp.deleted == 0).\
filter(models.FixedIp.network_id == network_id).\
filter(models.FixedIp.allocated == True).\
join((models.VirtualInterface, vif_and)).\
join((models.Instance, inst_and)).\
filter(models.FixedIp.instance_uuid != None).\
filter(models.FixedIp.virtual_interface_id != None)
if host:
query = query.filter(models.Instance.host == host)
result = query.all()
data = []
for datum in result:
cleaned = {}
cleaned['address'] = datum[0]
cleaned['instance_uuid'] = datum[1]
cleaned['network_id'] = datum[2]
cleaned['vif_id'] = datum[3]
cleaned['vif_address'] = datum[4]
cleaned['instance_hostname'] = datum[5]
cleaned['instance_updated'] = datum[6]
cleaned['instance_created'] = datum[7]
cleaned['allocated'] = datum[8]
cleaned['leased'] = datum[9]
data.append(cleaned)
return data
def network_in_use_on_host(context, network_id, host):
fixed_ips = network_get_associated_fixed_ips(context, network_id, host)
return len(fixed_ips) > 0
def _network_get_query(context, session=None):
return model_query(context, models.Network, session=session,
read_deleted="no")
@require_admin_context
def network_get_by_uuid(context, uuid):
result = _network_get_query(context).filter_by(uuid=uuid).first()
if not result:
raise exception.NetworkNotFoundForUUID(uuid=uuid)
return result
@require_admin_context
def network_get_by_cidr(context, cidr):
result = _network_get_query(context).\
filter(or_(models.Network.cidr == cidr,
models.Network.cidr_v6 == cidr)).\
first()
if not result:
raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
@require_admin_context
def network_get_all_by_host(context, host):
session = get_session()
fixed_host_filter = or_(models.FixedIp.host == host,
models.Instance.host == host)
fixed_ip_query = model_query(context, models.FixedIp.network_id,
base_model=models.FixedIp,
session=session).\
outerjoin((models.VirtualInterface,
models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id)).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.VirtualInterface.instance_uuid)).\
filter(fixed_host_filter)
# NOTE(vish): return networks that have host set
# or that have a fixed ip with host set
# or that have an instance with host set
host_filter = or_(models.Network.host == host,
models.Network.id.in_(fixed_ip_query.subquery()))
return _network_get_query(context, session=session).\
filter(host_filter).\
all()
@require_admin_context
def network_set_host(context, network_id, host_id):
session = get_session()
with session.begin():
network_ref = _network_get_query(context, session=session).\
filter_by(id=network_id).\
with_lockmode('update').\
first()
if not network_ref:
raise exception.NetworkNotFound(network_id=network_id)
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not network_ref['host']:
network_ref['host'] = host_id
session.add(network_ref)
return network_ref['host']
@require_context
def network_update(context, network_id, values):
session = get_session()
with session.begin():
network_ref = _network_get(context, network_id, session=session)
network_ref.update(values)
try:
network_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
return network_ref
###################
@require_context
def quota_get(context, project_id, resource, user_id=None):
model = models.ProjectUserQuota if user_id else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
if not result:
if user_id:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get_all_by_project_and_user(context, project_id, user_id):
nova.context.authorize_project_context(context, project_id)
user_quotas = model_query(context, models.ProjectUserQuota.resource,
models.ProjectUserQuota.hard_limit,
base_model=models.ProjectUserQuota).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
all()
result = {'project_id': project_id, 'user_id': user_id}
for quota in user_quotas:
result[quota.resource] = quota.hard_limit
return result
@require_context
def quota_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_get_all(context, project_id):
nova.context.authorize_project_context(context, project_id)
result = model_query(context, models.ProjectUserQuota).\
filter_by(project_id=project_id).\
all()
return result
@require_admin_context
def quota_create(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
if per_user:
quota_ref.user_id = user_id
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
quota_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.QuotaExists(project_id=project_id, resource=resource)
return quota_ref
@require_admin_context
def quota_update(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
model = models.ProjectUserQuota if per_user else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if per_user:
query = query.filter_by(user_id=user_id)
result = query.update({'hard_limit': limit})
if not result:
if per_user:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
def quota_class_get(context, class_name, resource):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).\
all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_class_get_all_by_name(context, class_name):
nova.context.authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
quota_class_ref.save()
return quota_class_ref
@require_admin_context
def quota_class_update(context, class_name, resource, limit):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
update({'hard_limit': limit})
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
###################
@require_context
def quota_usage_get(context, project_id, resource, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
if resource not in PER_PROJECT_QUOTAS:
result = query.filter_by(user_id=user_id).first()
else:
result = query.filter_by(user_id=None).first()
else:
result = query.first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
def _quota_usage_get_all(context, project_id, user_id=None):
nova.context.authorize_project_context(context, project_id)
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id)
result = {'project_id': project_id}
if user_id:
query = query.filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == None))
result['user_id'] = user_id
rows = query.all()
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved)
return result
@require_context
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
return _quota_usage_get_all(context, project_id, user_id=user_id)
@require_context
def quota_usage_get_all_by_project(context, project_id):
return _quota_usage_get_all(context, project_id)
def _quota_usage_create(context, project_id, user_id, resource, in_use,
reserved, until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
# updated_at is needed for judgement of max_age
quota_usage_ref.updated_at = timeutils.utcnow()
quota_usage_ref.save(session=session)
return quota_usage_ref
@require_admin_context
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
updates = {}
for key in ['in_use', 'reserved', 'until_refresh']:
if key in kwargs:
updates[key] = kwargs[key]
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == None)).\
update(updates)
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
###################
def _reservation_create(context, uuid, usage, project_id, user_id, resource,
delta, expire, session=None):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
reservation_ref.project_id = project_id
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(session=session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_user_quota_usages(context, session, project_id, user_id):
# Broken out for testability
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == None)).\
with_lockmode('update').\
all()
return dict((row.resource, row) for row in rows)
def _get_project_quota_usages(context, session, project_id):
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
with_lockmode('update').\
all()
result = dict()
# Get the total count of in_use,reserved
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
result[row.resource]['total'] += (row.in_use + row.reserved)
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved,
total=row.in_use + row.reserved)
return result
@require_context
@_retry_on_deadlock
def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
elevated = context.elevated()
session = get_session()
with session.begin():
if project_id is None:
project_id = context.project_id
if user_id is None:
user_id = context.user_id
# Get the current usages
user_usages = _get_user_quota_usages(context, session,
project_id, user_id)
project_usages = _get_project_quota_usages(context, session,
project_id)
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
refresh = False
if ((resource not in PER_PROJECT_QUOTAS) and
(resource not in user_usages)):
user_usages[resource] = _quota_usage_create(elevated,
project_id,
user_id,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif ((resource in PER_PROJECT_QUOTAS) and
(resource not in user_usages)):
user_usages[resource] = _quota_usage_create(elevated,
project_id,
None,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif user_usages[resource].in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif user_usages[resource].until_refresh is not None:
user_usages[resource].until_refresh -= 1
if user_usages[resource].until_refresh <= 0:
refresh = True
elif max_age and (user_usages[resource].updated_at -
timeutils.utcnow()).seconds >= max_age:
refresh = True
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
updates = sync(elevated, project_id, user_id, session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if ((res not in PER_PROJECT_QUOTAS) and
(res not in user_usages)):
user_usages[res] = _quota_usage_create(elevated,
project_id,
user_id,
res,
0, 0,
until_refresh or None,
session=session)
if ((res in PER_PROJECT_QUOTAS) and
(res not in user_usages)):
user_usages[res] = _quota_usage_create(elevated,
project_id,
None,
res,
0, 0,
until_refresh or None,
session=session)
if user_usages[res].in_use != in_use:
LOG.debug(_('quota_usages out of sync, updating. '
'project_id: %(project_id)s, '
'user_id: %(user_id)s, '
'resource: %(res)s, '
'tracked usage: %(tracked_use)s, '
'actual usage: %(in_use)s'),
{'project_id': project_id,
'user_id': user_id,
'res': res,
'tracked_use': user_usages[res].in_use,
'in_use': in_use})
# Update the usage
user_usages[res].in_use = in_use
user_usages[res].until_refresh = until_refresh or None
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [res for res, delta in deltas.items()
if delta < 0 and
delta + user_usages[res].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
for key, value in user_usages.items():
if key not in project_usages:
project_usages[key] = value
overs = [res for res, delta in deltas.items()
if user_quotas[res] >= 0 and delta >= 0 and
(project_quotas[res] < delta +
project_usages[res]['total'] or
user_quotas[res] < delta +
user_usages[res].total)]
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for res, delta in deltas.items():
reservation = _reservation_create(elevated,
str(uuid.uuid4()),
user_usages[res],
project_id,
user_id,
res, delta, expire,
session=session)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
user_usages[res].reserved += delta
# Apply updates to the usages table
for usage_ref in user_usages.values():
session.add(usage_ref)
if unders:
LOG.warning(_("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
if project_quotas == user_quotas:
usages = project_usages
else:
usages = user_usages
usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
for k, v in usages.items())
headroom = dict((res, user_quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in user_quotas.keys())
# If quota_cores is unlimited [-1]:
# - set cores headroom based on instances headroom:
if user_quotas.get('cores') == -1:
if deltas['cores']:
hc = headroom['instances'] * deltas['cores']
headroom['cores'] = hc / deltas['instances']
else:
headroom['cores'] = headroom['instances']
# If quota_ram is unlimited [-1]:
# - set ram headroom based on instances headroom:
if user_quotas.get('ram') == -1:
if deltas['ram']:
hr = headroom['instances'] * deltas['ram']
headroom['ram'] = hr / deltas['instances']
else:
headroom['ram'] = headroom['instances']
raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas,
usages=usages, headroom=headroom)
return reservations
def _quota_reservations_query(session, context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update')
@require_context
@_retry_on_deadlock
def reservation_commit(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
usages = _get_user_quota_usages(context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_context
@_retry_on_deadlock
def reservation_rollback(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
usages = _get_user_quota_usages(context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_admin_context
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
session = get_session()
with session.begin():
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
@require_admin_context
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
@require_admin_context
def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
reservation_query = model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter(models.Reservation.expire < current_time)
for reservation in reservation_query.join(models.QuotaUsage).all():
if reservation.delta >= 0:
reservation.usage.reserved -= reservation.delta
session.add(reservation.usage)
reservation_query.soft_delete(synchronize_session=False)
###################
def _ec2_volume_get_query(context, session=None):
return model_query(context, models.VolumeIdMapping,
session=session, read_deleted='yes')
def _ec2_snapshot_get_query(context, session=None):
return model_query(context, models.SnapshotIdMapping,
session=session, read_deleted='yes')
@require_context
def ec2_volume_create(context, volume_uuid, id=None):
"""Create ec2 compatible volume by provided uuid."""
ec2_volume_ref = models.VolumeIdMapping()
ec2_volume_ref.update({'uuid': volume_uuid})
if id is not None:
ec2_volume_ref.update({'id': id})
ec2_volume_ref.save()
return ec2_volume_ref
@require_context
def get_ec2_volume_id_by_uuid(context, volume_id):
result = _ec2_volume_get_query(context).\
filter_by(uuid=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result['id']
@require_context
def get_volume_uuid_by_ec2_id(context, ec2_id):
result = _ec2_volume_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=ec2_id)
return result['uuid']
@require_context
def ec2_snapshot_create(context, snapshot_uuid, id=None):
"""Create ec2 compatible snapshot by provided uuid."""
ec2_snapshot_ref = models.SnapshotIdMapping()
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
if id is not None:
ec2_snapshot_ref.update({'id': id})
ec2_snapshot_ref.save()
return ec2_snapshot_ref
@require_context
def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
result = _ec2_snapshot_get_query(context).\
filter_by(uuid=snapshot_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return result['id']
@require_context
def get_snapshot_uuid_by_ec2_id(context, ec2_id):
result = _ec2_snapshot_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=ec2_id)
return result['uuid']
###################
def _block_device_mapping_get_query(context, session=None,
columns_to_join=None, use_slave=False):
if columns_to_join is None:
columns_to_join = []
query = model_query(context, models.BlockDeviceMapping,
session=session, use_slave=use_slave)
for column in columns_to_join:
query = query.options(joinedload(column))
return query
def _scrub_empty_str_values(dct, keys_to_scrub):
"""Remove any keys found in sequence keys_to_scrub from the dict
if they have the value ''.
"""
for key in keys_to_scrub:
if key in dct and dct[key] == '':
del dct[key]
def _from_legacy_values(values, legacy, allow_updates=False):
if legacy:
if allow_updates and block_device.is_safe_for_update(values):
return values
else:
return block_device.BlockDeviceDict.from_legacy(values)
else:
return values
@require_context
def block_device_mapping_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy)
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save()
return bdm_ref
@require_context
def block_device_mapping_update(context, bdm_id, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
query = _block_device_mapping_get_query(context).filter_by(id=bdm_id)
query.update(values)
return query.first()
def block_device_mapping_update_or_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
session = get_session()
with session.begin():
result = None
# NOTE(xqueralt): Only update a BDM when device_name was provided. We
# allow empty device names so they will be set later by the manager.
if values['device_name']:
query = _block_device_mapping_get_query(context, session=session)
result = query.filter_by(instance_uuid=values['instance_uuid'],
device_name=values['device_name']).first()
if result:
result.update(values)
else:
# Either the device_name doesn't exist in the database yet, or no
# device_name was provided. Both cases mean creating a new BDM.
result = models.BlockDeviceMapping(**values)
result.save(session=session)
# NOTE(xqueralt): Prevent from having multiple swap devices for the
# same instance. This will delete all the existing ones.
if block_device.new_format_is_swap(values):
query = _block_device_mapping_get_query(context, session=session)
query = query.filter_by(instance_uuid=values['instance_uuid'],
source_type='blank', guest_format='swap')
query = query.filter(models.BlockDeviceMapping.id != result.id)
query.soft_delete()
return result
@require_context
def block_device_mapping_get_all_by_instance(context, instance_uuid,
use_slave=False):
return _block_device_mapping_get_query(context, use_slave=use_slave).\
filter_by(instance_uuid=instance_uuid).\
all()
@require_context
def block_device_mapping_get_by_volume_id(context, volume_id,
columns_to_join=None):
return _block_device_mapping_get_query(context,
columns_to_join=columns_to_join).\
filter_by(volume_id=volume_id).\
first()
@require_context
def block_device_mapping_destroy(context, bdm_id):
_block_device_mapping_get_query(context).\
filter_by(id=bdm_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(volume_id=volume_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(device_name=device_name).\
soft_delete()
###################
def _security_group_create(context, values, session=None):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=values['project_id'],
security_group_name=values['name'])
return security_group_ref
def _security_group_get_query(context, session=None, read_deleted=None,
project_only=False, join_rules=True):
query = model_query(context, models.SecurityGroup, session=session,
read_deleted=read_deleted, project_only=project_only)
if join_rules:
query = query.options(joinedload_all('rules.grantee_group'))
return query
def _security_group_get_by_names(context, session, project_id, group_names):
"""Get security group models for a project by a list of names.
Raise SecurityGroupNotFoundForProject for a name not found.
"""
query = _security_group_get_query(context, session=session,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter(models.SecurityGroup.name.in_(group_names))
sg_models = query.all()
if len(sg_models) == len(group_names):
return sg_models
# Find the first one missing and raise
group_names_from_models = [x.name for x in sg_models]
for group_name in group_names:
if group_name not in group_names_from_models:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
# Not Reached
@require_context
def security_group_get_all(context):
return _security_group_get_query(context).all()
@require_context
def security_group_get(context, security_group_id, columns_to_join=None):
query = _security_group_get_query(context, project_only=True).\
filter_by(id=security_group_id)
if columns_to_join is None:
columns_to_join = []
for column in columns_to_join:
if column.startswith('instances'):
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
return result
@require_context
def security_group_get_by_name(context, project_id, group_name,
columns_to_join=None):
query = _security_group_get_query(context,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter_by(name=group_name)
if columns_to_join is None:
columns_to_join = ['instances', 'rules.grantee_group']
for column in columns_to_join:
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
return result
@require_context
def security_group_get_by_project(context, project_id):
return _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_context
def security_group_get_by_instance(context, instance_uuid):
return _security_group_get_query(context, read_deleted="no").\
join(models.SecurityGroup.instances).\
filter_by(uuid=instance_uuid).\
all()
@require_context
def security_group_in_use(context, group_id):
session = get_session()
with session.begin():
# Are there any instances that haven't been deleted
# that include this group?
inst_assoc = model_query(context,
models.SecurityGroupInstanceAssociation,
read_deleted="no", session=session).\
filter_by(security_group_id=group_id).\
all()
for ia in inst_assoc:
num_instances = model_query(context, models.Instance,
session=session, read_deleted="no").\
filter_by(uuid=ia.instance_uuid).\
count()
if num_instances:
return True
return False
@require_context
def security_group_create(context, values):
return _security_group_create(context, values)
@require_context
def security_group_update(context, security_group_id, values,
columns_to_join=None):
session = get_session()
with session.begin():
query = model_query(context, models.SecurityGroup,
session=session).filter_by(id=security_group_id)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload_all(column))
security_group_ref = query.first()
if not security_group_ref:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
security_group_ref.update(values)
name = security_group_ref['name']
project_id = security_group_ref['project_id']
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=project_id,
security_group_name=name)
return security_group_ref
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
session = get_session()
with session.begin():
try:
default_group = _security_group_get_by_names(context,
session,
context.project_id,
['default'])[0]
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user_id,
'project_id': context.project_id}
default_group = _security_group_create(context, values,
session=session)
usage = model_query(context, models.QuotaUsage,
read_deleted="no", session=session).\
filter_by(project_id=context.project_id).\
filter_by(user_id=context.user_id).\
filter_by(resource='security_groups')
# Create quota usage for auto created default security group
if not usage.first():
elevated = context.elevated()
_quota_usage_create(elevated,
context.project_id,
context.user_id,
'security_groups',
1, 0,
None,
session=session)
else:
usage.update({'in_use': int(usage.first().in_use) + 1})
default_rules = _security_group_rule_get_default_query(context,
session=session).all()
for default_rule in default_rules:
# This is suboptimal, it should be programmatic to know
# the values of the default_rule
rule_values = {'protocol': default_rule.protocol,
'from_port': default_rule.from_port,
'to_port': default_rule.to_port,
'cidr': default_rule.cidr,
'parent_group_id': default_group.id,
}
_security_group_rule_create(context,
rule_values,
session=session)
return default_group
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
model_query(context, models.SecurityGroup,
session=session).\
filter_by(id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(security_group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(parent_group_id=security_group_id).\
soft_delete()
def _security_group_count_by_project_and_user(context, project_id, user_id,
session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.SecurityGroup, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
###################
def _security_group_rule_create(context, values, session=None):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
security_group_rule_ref.save(session=session)
return security_group_rule_ref
def _security_group_rule_get_query(context, session=None):
return model_query(context, models.SecurityGroupIngressRule,
session=session)
@require_context
def security_group_rule_get(context, security_group_rule_id):
result = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
first())
if not result:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
return result
@require_context
def security_group_rule_get_by_security_group(context, security_group_id,
columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['grantee_group.instances.system_metadata',
'grantee_group.instances.info_cache']
query = (_security_group_rule_get_query(context).
filter_by(parent_group_id=security_group_id))
for column in columns_to_join:
query = query.options(joinedload_all(column))
return query.all()
@require_context
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
return (_security_group_rule_get_query(context).
filter_by(group_id=security_group_id).
all())
@require_context
def security_group_rule_create(context, values):
return _security_group_rule_create(context, values)
@require_context
def security_group_rule_destroy(context, security_group_rule_id):
count = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
soft_delete())
if count == 0:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
@require_context
def security_group_rule_count_by_group(context, security_group_id):
return (model_query(context, models.SecurityGroupIngressRule,
read_deleted="no").
filter_by(parent_group_id=security_group_id).
count())
#
###################
def _security_group_rule_get_default_query(context, session=None):
return model_query(context, models.SecurityGroupIngressDefaultRule,
session=session)
@require_context
def security_group_default_rule_get(context, security_group_rule_default_id):
result = _security_group_rule_get_default_query(context).\
filter_by(id=security_group_rule_default_id).\
first()
if not result:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
return result
@require_admin_context
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
session = get_session()
with session.begin():
count = _security_group_rule_get_default_query(context,
session=session).\
filter_by(id=security_group_rule_default_id).\
soft_delete()
if count == 0:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
@require_admin_context
def security_group_default_rule_create(context, values):
security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
security_group_default_rule_ref.update(values)
security_group_default_rule_ref.save()
return security_group_default_rule_ref
@require_context
def security_group_default_rule_list(context):
return _security_group_rule_get_default_query(context).\
all()
###################
@require_admin_context
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
fw_rule_ref.save()
return fw_rule_ref
@require_admin_context
def provider_fw_rule_get_all(context):
return model_query(context, models.ProviderFirewallRule).all()
@require_admin_context
def provider_fw_rule_destroy(context, rule_id):
session = get_session()
with session.begin():
session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
soft_delete()
###################
@require_context
def project_get_networks(context, project_id, associate=True):
# NOTE(tr3buchet): as before this function will associate
# a project with a network if it doesn't have one and
# associate is true
result = model_query(context, models.Network, read_deleted="no").\
filter_by(project_id=project_id).\
all()
if not result:
if not associate:
return []
return [network_associate(context, project_id)]
return result
###################
@require_admin_context
def migration_create(context, values):
migration = models.Migration()
migration.update(values)
migration.save()
return migration
@require_admin_context
def migration_update(context, id, values):
session = get_session()
with session.begin():
migration = _migration_get(context, id, session=session)
migration.update(values)
return migration
def _migration_get(context, id, session=None):
result = model_query(context, models.Migration, session=session,
read_deleted="yes").\
filter_by(id=id).\
first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
return result
@require_admin_context
def migration_get(context, id):
return _migration_get(context, id)
@require_admin_context
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).\
first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
status=status)
return result
@require_admin_context
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute, use_slave=False):
confirm_window = (timeutils.utcnow() -
datetime.timedelta(seconds=confirm_window))
return model_query(context, models.Migration, read_deleted="yes",
use_slave=use_slave).\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
filter_by(dest_compute=dest_compute).\
all()
@require_admin_context
def migration_get_in_progress_by_host_and_node(context, host, node):
return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host,
models.Migration.source_node == node),
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['confirmed', 'reverted',
'error'])).\
options(joinedload_all('instance.system_metadata')).\
all()
@require_admin_context
def migration_get_all_by_filters(context, filters):
query = model_query(context, models.Migration)
if "status" in filters:
query = query.filter(models.Migration.status == filters["status"])
if "host" in filters:
host = filters["host"]
query = query.filter(or_(models.Migration.source_compute == host,
models.Migration.dest_compute == host))
return query.all()
##################
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
try:
pool.save()
except db_exc.DBDuplicateEntry:
raise exception.ConsolePoolExists(
host=values["host"],
console_type=values["console_type"],
compute_host=values["compute_host"],
)
return pool
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
result = model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(compute_host=compute_host).\
options(joinedload('consoles')).\
first()
if not result:
raise exception.ConsolePoolNotFoundForHostType(
host=host, console_type=console_type,
compute_host=compute_host)
return result
def console_pool_get_all_by_host_type(context, host, console_type):
return model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
options(joinedload('consoles')).\
all()
def console_create(context, values):
console = models.Console()
console.update(values)
console.save()
return console
def console_delete(context, console_id):
session = get_session()
with session.begin():
# NOTE(mdragon): consoles are meant to be transient.
session.query(models.Console).\
filter_by(id=console_id).\
delete()
def console_get_by_pool_instance(context, pool_id, instance_uuid):
result = model_query(context, models.Console, read_deleted="yes").\
filter_by(pool_id=pool_id).\
filter_by(instance_uuid=instance_uuid).\
options(joinedload('pool')).\
first()
if not result:
raise exception.ConsoleNotFoundInPoolForInstance(
pool_id=pool_id, instance_uuid=instance_uuid)
return result
def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
return query.all()
def console_get(context, console_id, instance_uuid=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(id=console_id).\
options(joinedload('pool'))
if instance_uuid is not None:
query = query.filter_by(instance_uuid=instance_uuid)
result = query.first()
if not result:
if instance_uuid:
raise exception.ConsoleNotFoundForInstance(
console_id=console_id, instance_uuid=instance_uuid)
else:
raise exception.ConsoleNotFound(console_id=console_id)
return result
##################
@require_admin_context
def flavor_create(context, values, projects=None):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.iteritems():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
if projects is None:
projects = []
session = get_session()
with session.begin():
try:
instance_type_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'flavorid' in e.columns:
raise exception.FlavorIdExists(flavor_id=values['flavorid'])
raise exception.FlavorExists(name=values['name'])
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_ref.id,
"project_id": project})
access_ref.save()
return _dict_with_extra_specs(instance_type_ref)
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance or instance type query returned
by sqlalchemy and returns it as a dictionary, converting the
extra_specs entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = dict([(x['key'], x['value'])
for x in inst_type_query['extra_specs']])
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
def _flavor_get_query(context, session=None, read_deleted=None):
query = model_query(context, models.InstanceTypes, session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if not context.is_admin:
the_filter = [models.InstanceTypes.is_public == True]
the_filter.extend([
models.InstanceTypes.projects.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
@require_context
def flavor_get_all(context, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc', limit=None,
marker=None):
"""Returns all flavors.
"""
filters = filters or {}
# FIXME(sirp): now that we have the `disabled` field for flavors, we
# should probably remove the use of `deleted` to mark inactive. `deleted`
# should mean truly deleted, e.g. we can safely purge the record out of the
# database.
read_deleted = "yes" if inactive else "no"
sort_fn = {'desc': desc, 'asc': asc}
query = _flavor_get_query(context, read_deleted=read_deleted)
if 'min_memory_mb' in filters:
query = query.filter(
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
if 'min_root_gb' in filters:
query = query.filter(
models.InstanceTypes.root_gb >= filters['min_root_gb'])
if 'disabled' in filters:
query = query.filter(
models.InstanceTypes.disabled == filters['disabled'])
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.InstanceTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
the_filter.extend([
models.InstanceTypes.projects.any(
project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
marker_row = None
if marker is not None:
marker_row = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=marker).\
first()
if not marker_row:
raise exception.MarkerNotFound(marker)
query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
[sort_key, 'id'],
marker=marker_row,
sort_dir=sort_dir)
inst_types = query.all()
return [_dict_with_extra_specs(i) for i in inst_types]
def _flavor_get_id_from_flavor_query(context, flavor_id, session=None):
return model_query(context, models.InstanceTypes.id, read_deleted="no",
session=session, base_model=models.InstanceTypes).\
filter_by(flavorid=flavor_id)
def _flavor_get_id_from_flavor(context, flavor_id, session=None):
result = _flavor_get_id_from_flavor_query(context, flavor_id,
session=session).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return result[0]
@require_context
def flavor_get(context, id):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(id=id).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=id)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_name(context, name):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(name=name).\
first()
if not result:
raise exception.FlavorNotFoundByName(flavor_name=name)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
"""Returns a dict describing specific flavor_id."""
result = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=flavor_id).\
order_by(asc("deleted"), asc("id")).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return _dict_with_extra_specs(result)
@require_admin_context
def flavor_destroy(context, name):
"""Marks specific flavor as deleted."""
session = get_session()
with session.begin():
ref = model_query(context, models.InstanceTypes, session=session,
read_deleted="no").\
filter_by(name=name).\
first()
if not ref:
raise exception.FlavorNotFoundByName(flavor_name=name)
ref.soft_delete(session=session)
model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
model_query(context, models.InstanceTypeProjects,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
def _flavor_access_query(context, session=None):
return model_query(context, models.InstanceTypeProjects, session=session,
read_deleted="no")
@require_admin_context
def flavor_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access list by flavor id."""
instance_type_id_subq = \
_flavor_get_id_from_flavor_query(context, flavor_id)
access_refs = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id_subq).\
all()
return access_refs
@require_admin_context
def flavor_access_add(context, flavor_id, project_id):
"""Add given tenant to the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_id,
"project_id": project_id})
try:
access_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FlavorAccessExists(flavor_id=flavor_id,
project_id=project_id)
return access_ref
@require_admin_context
def flavor_access_remove(context, flavor_id, project_id):
"""Remove given tenant from the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
count = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id).\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
project_id=project_id)
def _flavor_extra_specs_get_query(context, flavor_id, session=None):
instance_type_id_subq = \
_flavor_get_id_from_flavor_query(context, flavor_id)
return model_query(context, models.InstanceTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(instance_type_id=instance_type_id_subq)
@require_context
def flavor_extra_specs_get(context, flavor_id):
rows = _flavor_extra_specs_get_query(context, flavor_id).all()
return dict([(row['key'], row['value']) for row in rows])
@require_context
def flavor_extra_specs_get_item(context, flavor_id, key):
result = _flavor_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
first()
if not result:
raise exception.FlavorExtraSpecsNotFound(
extra_specs_key=key, flavor_id=flavor_id)
return {result["key"]: result["value"]}
@require_context
def flavor_extra_specs_delete(context, flavor_id, key):
result = _flavor_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
soft_delete(synchronize_session=False)
# did not find the extra spec
if result == 0:
raise exception.FlavorExtraSpecsNotFound(
extra_specs_key=key, flavor_id=flavor_id)
@require_context
def flavor_extra_specs_update_or_create(context, flavor_id, specs,
max_retries=10):
for attempt in xrange(max_retries):
try:
session = get_session()
with session.begin():
instance_type_id = _flavor_get_id_from_flavor(context,
flavor_id, session)
spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=instance_type_id).\
filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
all()
existing_keys = set()
for spec_ref in spec_refs:
key = spec_ref["key"]
existing_keys.add(key)
spec_ref.update({"value": specs[key]})
for key, value in specs.iteritems():
if key in existing_keys:
continue
spec_ref = models.InstanceTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id})
session.add(spec_ref)
return specs
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
if attempt == max_retries - 1:
raise
####################
@require_admin_context
def cell_create(context, values):
cell = models.Cell()
cell.update(values)
try:
cell.save()
except db_exc.DBDuplicateEntry:
raise exception.CellExists(name=values['name'])
return cell
def _cell_get_by_name_query(context, cell_name, session=None):
return model_query(context, models.Cell,
session=session).filter_by(name=cell_name)
@require_admin_context
def cell_update(context, cell_name, values):
session = get_session()
with session.begin():
cell_query = _cell_get_by_name_query(context, cell_name,
session=session)
if not cell_query.update(values):
raise exception.CellNotFound(cell_name=cell_name)
cell = cell_query.first()
return cell
@require_admin_context
def cell_delete(context, cell_name):
return _cell_get_by_name_query(context, cell_name).soft_delete()
@require_admin_context
def cell_get(context, cell_name):
result = _cell_get_by_name_query(context, cell_name).first()
if not result:
raise exception.CellNotFound(cell_name=cell_name)
return result
@require_admin_context
def cell_get_all(context):
return model_query(context, models.Cell, read_deleted="no").all()
########################
# User-provided metadata
def _instance_metadata_get_multi(context, instance_uuids,
session=None, use_slave=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceMetadata,
session=session, use_slave=use_slave).\
filter(
models.InstanceMetadata.instance_uuid.in_(instance_uuids))
def _instance_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceMetadata, session=session,
read_deleted="no").\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_metadata_get(context, instance_uuid):
rows = _instance_metadata_get_query(context, instance_uuid).all()
return dict((row['key'], row['value']) for row in rows)
@require_context
@_retry_on_deadlock
def instance_metadata_delete(context, instance_uuid, key):
_instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
soft_delete()
@require_context
@_retry_on_deadlock
def instance_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
#######################
# System-owned metadata
def _instance_system_metadata_get_multi(context, instance_uuids,
session=None, use_slave=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceSystemMetadata,
session=session, use_slave=use_slave).\
filter(
models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
def _instance_system_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceSystemMetadata,
session=session).\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_system_metadata_get(context, instance_uuid):
rows = _instance_system_metadata_get_query(context, instance_uuid).all()
return dict((row['key'], row['value']) for row in rows)
@require_context
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceSystemMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
####################
@require_admin_context
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
try:
agent_build_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AgentBuildExists(hypervisor=values['hypervisor'],
os=values['os'], architecture=values['architecture'])
return agent_build_ref
@require_admin_context
def agent_build_get_by_triple(context, hypervisor, os, architecture):
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
filter_by(os=os).\
filter_by(architecture=architecture).\
first()
@require_admin_context
def agent_build_get_all(context, hypervisor=None):
if hypervisor:
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
all()
else:
return model_query(context, models.AgentBuild, read_deleted="no").\
all()
@require_admin_context
def agent_build_destroy(context, agent_build_id):
rows_affected = model_query(context, models.AgentBuild).filter_by(
id=agent_build_id).soft_delete()
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
@require_admin_context
def agent_build_update(context, agent_build_id, values):
rows_affected = model_query(context, models.AgentBuild).\
filter_by(id=agent_build_id).\
update(values)
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
####################
@require_context
def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
return model_query(context, models.BandwidthUsage, read_deleted="yes",
use_slave=use_slave).\
filter_by(start_period=start_period).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
first()
@require_context
def bw_usage_get_by_uuids(context, uuids, start_period):
return model_query(context, models.BandwidthUsage, read_deleted="yes").\
filter(models.BandwidthUsage.uuid.in_(uuids)).\
filter_by(start_period=start_period).\
all()
@require_context
@_retry_on_deadlock
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None):
session = get_session()
if last_refreshed is None:
last_refreshed = timeutils.utcnow()
# NOTE(comstud): More often than not, we'll be updating records vs
# creating records. Optimize accordingly, trying to update existing
# records. Fall back to creation when no rows are updated.
with session.begin():
values = {'last_refreshed': last_refreshed,
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'bw_in': bw_in,
'bw_out': bw_out}
rows = model_query(context, models.BandwidthUsage,
session=session, read_deleted="yes").\
filter_by(start_period=start_period).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
update(values, synchronize_session=False)
if rows:
return
bwusage = models.BandwidthUsage()
bwusage.start_period = start_period
bwusage.uuid = uuid
bwusage.mac = mac
bwusage.last_refreshed = last_refreshed
bwusage.bw_in = bw_in
bwusage.bw_out = bw_out
bwusage.last_ctr_in = last_ctr_in
bwusage.last_ctr_out = last_ctr_out
try:
bwusage.save(session=session)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to create
# the usage entry at the same time. First one wins.
pass
####################
@require_context
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
filter(or_(models.VolumeUsage.tot_last_refreshed == None,
models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed == None,
models.VolumeUsage.curr_last_refreshed > begin,
)).\
all()
@require_context
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
update_totals=False):
session = get_session()
refreshed = timeutils.utcnow()
with session.begin():
values = {}
# NOTE(dricco): We will be mostly updating current usage records vs
# updating total or creating records. Optimize accordingly.
if not update_totals:
values = {'curr_last_refreshed': refreshed,
'curr_reads': rd_req,
'curr_read_bytes': rd_bytes,
'curr_writes': wr_req,
'curr_write_bytes': wr_bytes,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
else:
values = {'tot_last_refreshed': refreshed,
'tot_reads': models.VolumeUsage.tot_reads + rd_req,
'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
rd_bytes,
'tot_writes': models.VolumeUsage.tot_writes + wr_req,
'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
wr_bytes,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
current_usage = model_query(context, models.VolumeUsage,
session=session, read_deleted="yes").\
filter_by(volume_id=id).\
first()
if current_usage:
if (rd_req < current_usage['curr_reads'] or
rd_bytes < current_usage['curr_read_bytes'] or
wr_req < current_usage['curr_writes'] or
wr_bytes < current_usage['curr_write_bytes']):
LOG.info(_("Volume(%s) has lower stats then what is in "
"the database. Instance must have been rebooted "
"or crashed. Updating totals.") % id)
if not update_totals:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'])
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'])
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'])
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'])
else:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'] +
rd_req)
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'] + rd_bytes)
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'] +
wr_req)
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'] + wr_bytes)
current_usage.update(values)
current_usage.save(session=session)
session.refresh(current_usage)
return current_usage
vol_usage = models.VolumeUsage()
vol_usage.volume_id = id
vol_usage.instance_uuid = instance_id
vol_usage.project_id = project_id
vol_usage.user_id = user_id
vol_usage.availability_zone = availability_zone
if not update_totals:
vol_usage.curr_last_refreshed = refreshed
vol_usage.curr_reads = rd_req
vol_usage.curr_read_bytes = rd_bytes
vol_usage.curr_writes = wr_req
vol_usage.curr_write_bytes = wr_bytes
else:
vol_usage.tot_last_refreshed = refreshed
vol_usage.tot_reads = rd_req
vol_usage.tot_read_bytes = rd_bytes
vol_usage.tot_writes = wr_req
vol_usage.tot_write_bytes = wr_bytes
vol_usage.save(session=session)
return vol_usage
####################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(id=image_id).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_id)
return result
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(uuid=image_uuid).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_uuid)
return result
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
s3_image_ref.save()
except Exception as e:
raise db_exc.DBError(e)
return s3_image_ref
####################
def _aggregate_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
def aggregate_create(context, values, metadata=None):
session = get_session()
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
session=session,
read_deleted='no')
aggregate = query.first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
aggregate.save(session=session)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this aggregate.
aggregate._hosts = []
aggregate._metadata = []
else:
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
return aggregate_get(context, aggregate.id)
def aggregate_get(context, aggregate_id):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id)
aggregate = query.first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
return aggregate
def aggregate_get_by_host(context, host, key=None):
"""Return rows that match host (mandatory) and metadata key (optional).
:param host matches host, and is required.
:param key Matches metadata key, if not None.
"""
query = model_query(context, models.Aggregate)
query = query.options(joinedload('_hosts'))
query = query.options(joinedload('_metadata'))
query = query.join('_hosts')
query = query.filter(models.AggregateHost.host == host)
if key:
query = query.join("_metadata").filter(
models.AggregateMetadata.key == key)
return query.all()
def aggregate_metadata_get_by_host(context, host, key=None):
query = model_query(context, models.Aggregate)
query = query.join("_hosts")
query = query.join("_metadata")
query = query.filter(models.AggregateHost.host == host)
query = query.options(contains_eager("_metadata"))
if key:
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
def aggregate_metadata_get_by_metadata_key(context, aggregate_id, key):
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.Aggregate.id == aggregate_id)
query = query.options(contains_eager("_metadata"))
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
def aggregate_host_get_by_metadata_key(context, key):
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.AggregateMetadata.key == key)
query = query.options(contains_eager("_metadata"))
query = query.options(joinedload("_hosts"))
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for agghost in agg._hosts:
metadata[agghost.host].add(agg._metadata[0]['value'])
return dict(metadata)
def aggregate_update(context, aggregate_id, values):
session = get_session()
if "name" in values:
aggregate_by_name = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
session=session,
read_deleted='no').first())
if aggregate_by_name and aggregate_by_name.id != aggregate_id:
# there is another aggregate with the new name
raise exception.AggregateNameExists(aggregate_name=values['name'])
aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).first())
set_delete = True
if aggregate:
if "availability_zone" in values:
az = values.pop('availability_zone')
if 'metadata' not in values:
values['metadata'] = {'availability_zone': az}
set_delete = False
else:
values['metadata']['availability_zone'] = az
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
aggregate_id,
values.pop('metadata'),
set_delete=set_delete)
aggregate.update(values)
aggregate.save(session=session)
values['metadata'] = metadata
return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
def aggregate_delete(context, aggregate_id):
session = get_session()
with session.begin():
count = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).\
soft_delete()
if count == 0:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
#Delete Metadata
model_query(context,
models.AggregateMetadata, session=session).\
filter_by(aggregate_id=aggregate_id).\
soft_delete()
def aggregate_get_all(context):
return _aggregate_get_query(context, models.Aggregate).all()
def _aggregate_metadata_get_query(context, aggregate_id, session=None,
read_deleted="yes"):
return model_query(context,
models.AggregateMetadata,
read_deleted=read_deleted,
session=session).\
filter_by(aggregate_id=aggregate_id)
@require_aggregate_exists
def aggregate_metadata_get(context, aggregate_id):
rows = model_query(context,
models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
return dict([(r['key'], r['value']) for r in rows])
@require_aggregate_exists
def aggregate_metadata_delete(context, aggregate_id, key):
count = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
metadata_key=key)
@require_aggregate_exists
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
max_retries=10):
all_keys = metadata.keys()
for attempt in xrange(max_retries):
try:
session = get_session()
with session.begin():
query = _aggregate_metadata_get_query(context, aggregate_id,
read_deleted='no',
session=session)
if set_delete:
query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
query = \
query.filter(models.AggregateMetadata.key.in_(all_keys))
already_existing_keys = set()
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({"value": metadata[key]})
already_existing_keys.add(key)
for key, value in metadata.iteritems():
if key in already_existing_keys:
continue
meta_ref = models.AggregateMetadata()
meta_ref.update({"key": key,
"value": value,
"aggregate_id": aggregate_id})
session.add(meta_ref)
return metadata
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
with excutils.save_and_reraise_exception() as ctxt:
if attempt < max_retries - 1:
ctxt.reraise = False
else:
msg = _("Add metadata failed for aggregate %(id)s after "
"%(retries)s retries") % {"id": aggregate_id,
"retries": max_retries}
LOG.warn(msg)
@require_aggregate_exists
def aggregate_host_get_all(context, aggregate_id):
rows = model_query(context,
models.AggregateHost).\
filter_by(aggregate_id=aggregate_id).all()
return [r.host for r in rows]
@require_aggregate_exists
def aggregate_host_delete(context, aggregate_id, host):
count = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
aggregate_id).\
filter_by(host=host).\
soft_delete()
if count == 0:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
host=host)
@require_aggregate_exists
def aggregate_host_add(context, aggregate_id, host):
host_ref = models.AggregateHost()
host_ref.update({"host": host, "aggregate_id": aggregate_id})
try:
host_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AggregateHostExists(host=host,
aggregate_id=aggregate_id)
return host_ref
################
def instance_fault_create(context, values):
"""Create a new InstanceFault."""
fault_ref = models.InstanceFault()
fault_ref.update(values)
fault_ref.save()
return dict(fault_ref.iteritems())
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
if not instance_uuids:
return {}
rows = model_query(context, models.InstanceFault, read_deleted='no').\
filter(models.InstanceFault.instance_uuid.in_(
instance_uuids)).\
order_by(desc("created_at"), desc("id")).\
all()
output = {}
for instance_uuid in instance_uuids:
output[instance_uuid] = []
for row in rows:
data = dict(row.iteritems())
output[row['instance_uuid']].append(data)
return output
##################
def action_start(context, values):
convert_objects_related_datetimes(values, 'start_time')
action_ref = models.InstanceAction()
action_ref.update(values)
action_ref.save()
return action_ref
def action_finish(context, values):
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action_ref = model_query(context, models.InstanceAction,
session=session).\
filter_by(instance_uuid=values['instance_uuid']).\
filter_by(request_id=values['request_id']).\
first()
if not action_ref:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
action_ref.update(values)
return action_ref
def actions_get(context, instance_uuid):
"""Get all instance actions for the provided uuid."""
actions = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
order_by(desc("created_at"), desc("id")).\
all()
return actions
def action_get_by_request_id(context, instance_uuid, request_id):
"""Get the action by request_id and given instance."""
action = _action_get_by_request_id(context, instance_uuid, request_id)
return action
def _action_get_by_request_id(context, instance_uuid, request_id,
session=None):
result = model_query(context, models.InstanceAction, session=session).\
filter_by(instance_uuid=instance_uuid).\
filter_by(request_id=request_id).\
first()
return result
def action_event_start(context, values):
"""Start an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
values['action_id'] = action['id']
event_ref = models.InstanceActionEvent()
event_ref.update(values)
session.add(event_ref)
return event_ref
def action_event_finish(context, values):
"""Finish an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
event_ref = model_query(context, models.InstanceActionEvent,
session=session).\
filter_by(action_id=action['id']).\
filter_by(event=values['event']).\
first()
if not event_ref:
raise exception.InstanceActionEventNotFound(action_id=action['id'],
event=values['event'])
event_ref.update(values)
if values['result'].lower() == 'error':
action.update({'message': 'Error'})
return event_ref
def action_events_get(context, action_id):
events = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
order_by(desc("created_at"), desc("id")).\
all()
return events
def action_event_get_by_id(context, action_id, event_id):
event = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
filter_by(id=event_id).\
first()
return event
##################
@require_context
def ec2_instance_create(context, instance_uuid, id=None):
"""Create ec2 compatible instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
ec2_instance_ref.update({'uuid': instance_uuid})
if id is not None:
ec2_instance_ref.update({'id': id})
ec2_instance_ref.save()
return ec2_instance_ref
@require_context
def get_ec2_instance_id_by_uuid(context, instance_id):
result = _ec2_instance_get_query(context).\
filter_by(uuid=instance_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result['id']
@require_context
def get_instance_uuid_by_ec2_id(context, ec2_id):
result = _ec2_instance_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=ec2_id)
return result['uuid']
def _ec2_instance_get_query(context, session=None):
return model_query(context,
models.InstanceIdMapping,
session=session,
read_deleted='yes')
def _task_log_get_query(context, task_name, period_beginning,
period_ending, host=None, state=None, session=None):
query = model_query(context, models.TaskLog, session=session).\
filter_by(task_name=task_name).\
filter_by(period_beginning=period_beginning).\
filter_by(period_ending=period_ending)
if host is not None:
query = query.filter_by(host=host)
if state is not None:
query = query.filter_by(state=state)
return query
@require_admin_context
def task_log_get(context, task_name, period_beginning, period_ending, host,
state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).first()
@require_admin_context
def task_log_get_all(context, task_name, period_beginning, period_ending,
host=None, state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).all()
@require_admin_context
def task_log_begin_task(context, task_name, period_beginning, period_ending,
host, task_items=None, message=None):
task = models.TaskLog()
task.task_name = task_name
task.period_beginning = period_beginning
task.period_ending = period_ending
task.host = host
task.state = "RUNNING"
if message:
task.message = message
if task_items:
task.task_items = task_items
try:
task.save()
except db_exc.DBDuplicateEntry:
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
@require_admin_context
def task_log_end_task(context, task_name, period_beginning, period_ending,
host, errors, message=None):
values = dict(state="DONE", errors=errors)
if message:
values["message"] = message
session = get_session()
with session.begin():
rows = _task_log_get_query(context, task_name, period_beginning,
period_ending, host, session=session).\
update(values)
if rows == 0:
#It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
def _get_default_deleted_value(table):
# TODO(dripton): It would be better to introspect the actual default value
# from the column, but I don't see a way to do that in the low-level APIs
# of SQLAlchemy 0.7. 0.8 has better introspection APIs, which we should
# use when Nova is ready to require 0.8.
# NOTE(mikal): this is a little confusing. This method returns the value
# that a _not_deleted_ row would have.
deleted_column_type = table.c.deleted.type
if isinstance(deleted_column_type, Integer):
return 0
elif isinstance(deleted_column_type, Boolean):
return False
elif isinstance(deleted_column_type, String):
return ""
else:
return None
@require_admin_context
def archive_deleted_rows_for_table(context, tablename, max_rows):
"""Move up to max_rows rows from one tables to the corresponding
shadow table. The context argument is only used for the decorator.
:returns: number of rows archived
"""
# NOTE(guochbo): There is a circular import, nova.db.sqlalchemy.utils
# imports nova.db.sqlalchemy.api.
from nova.db.sqlalchemy import utils as db_utils
engine = get_engine()
conn = engine.connect()
metadata = MetaData()
metadata.bind = engine
table = Table(tablename, metadata, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
rows_archived = 0
try:
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
# No corresponding shadow table; skip it.
return rows_archived
if tablename == "dns_domains":
# We have one table (dns_domains) where the key is called
# "domain" rather than "id"
column = table.c.domain
column_name = "domain"
else:
column = table.c.id
column_name = "id"
# NOTE(guochbo): Use InsertFromSelect and DeleteFromSelect to avoid
# database's limit of maximum parameter in one SQL statement.
query_insert = select([table],
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
query_delete = select([column],
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
insert_statement = db_utils.InsertFromSelect(shadow_table, query_insert)
delete_statement = db_utils.DeleteFromSelect(table, query_delete, column)
try:
# Group the insert and delete in a transaction.
with conn.begin():
result_insert = conn.execute(insert_statement)
result_delete = conn.execute(delete_statement)
except IntegrityError:
# A foreign key constraint keeps us from deleting some of
# these rows until we clean up a dependent table. Just
# skip this table for now; we'll come back to it later.
msg = _("IntegrityError detected when archiving table %s") % tablename
LOG.warn(msg)
return rows_archived
rows_archived = result_delete.rowcount
return rows_archived
@require_admin_context
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:returns: Number of rows archived.
"""
# The context argument is only used for the decorator.
tablenames = []
for model_class in models.__dict__.itervalues():
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
rows_archived = 0
for tablename in tablenames:
rows_archived += archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows - rows_archived)
if rows_archived >= max_rows:
break
return rows_archived
####################
def _instance_group_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.InstanceGroup: ['_policies', '_metadata',
'_members']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
def instance_group_create(context, values, policies=None, metadata=None,
members=None):
"""Create a new group with metadata."""
uuid = values.get('uuid', None)
if uuid is None:
uuid = uuidutils.generate_uuid()
values['uuid'] = uuid
session = get_session()
with session.begin():
try:
group = models.InstanceGroup()
group.update(values)
group.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.InstanceGroupIdExists(group_uuid=uuid)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this instance group.
group._policies = []
group._metadata = []
group._members = []
if policies:
_instance_group_policies_add(context, group.id, policies,
session=session)
if metadata:
_instance_group_metadata_add(context, group.id, metadata,
session=session)
if members:
_instance_group_members_add(context, group.id, members,
session=session)
return instance_group_get(context, uuid)
def instance_group_get(context, group_uuid):
"""Get a specific group by uuid."""
group = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return group
def instance_group_update(context, group_uuid, values):
"""Update the attributes of an group.
If values contains a metadata key, it updates the aggregate metadata
too. Similarly for the policies and members.
"""
session = get_session()
with session.begin():
group = model_query(context,
models.InstanceGroup,
session=session).\
filter_by(uuid=group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
policies = values.get('policies')
if policies is not None:
_instance_group_policies_add(context,
group.id,
values.pop('policies'),
set_delete=True,
session=session)
metadata = values.get('metadata')
if metadata is not None:
_instance_group_metadata_add(context,
group.id,
values.pop('metadata'),
set_delete=True,
session=session)
members = values.get('members')
if members is not None:
_instance_group_members_add(context,
group.id,
values.pop('members'),
set_delete=True,
session=session)
group.update(values)
if policies:
values['policies'] = policies
if metadata:
values['metadata'] = metadata
if members:
values['members'] = members
def instance_group_delete(context, group_uuid):
"""Delete an group."""
session = get_session()
with session.begin():
group_id = _instance_group_id(context, group_uuid, session=session)
count = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid,
session=session).soft_delete()
if count == 0:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
# Delete policies, metadata and members
instance_models = [models.InstanceGroupPolicy,
models.InstanceGroupMetadata,
models.InstanceGroupMember]
for model in instance_models:
model_query(context, model, session=session).\
filter_by(group_id=group_id).\
soft_delete()
def instance_group_get_all(context):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).all()
def instance_group_get_all_by_project_id(context, project_id):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).\
filter_by(project_id=project_id).\
all()
def _instance_group_model_get_query(context, model_class, group_id,
session=None, read_deleted='no'):
return model_query(context,
model_class,
read_deleted=read_deleted,
session=session).\
filter_by(group_id=group_id)
def _instance_group_id(context, group_uuid, session=None):
"""Returns the group database ID for the group UUID."""
result = model_query(context,
models.InstanceGroup.id,
base_model=models.InstanceGroup,
session=session).\
filter_by(uuid=group_uuid).\
first()
if not result:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return result.id
def _instance_group_metadata_add(context, id, metadata, set_delete=False,
session=None):
if not session:
session = get_session()
with session.begin(subtransactions=True):
all_keys = metadata.keys()
query = _instance_group_model_get_query(context,
models.InstanceGroupMetadata,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
query = query.filter(models.InstanceGroupMetadata.key.in_(all_keys))
already_existing_keys = set()
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({'value': metadata[key]})
already_existing_keys.add(key)
for key, value in metadata.iteritems():
if key in already_existing_keys:
continue
meta_ref = models.InstanceGroupMetadata()
meta_ref.update({'key': key,
'value': value,
'group_id': id})
session.add(meta_ref)
return metadata
def instance_group_metadata_add(context, group_uuid, metadata,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_metadata_add(context, id, metadata,
set_delete=set_delete)
def instance_group_metadata_delete(context, group_uuid, key):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupMetadata,
models.InstanceGroupMetadata.group_id,
id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.InstanceGroupMetadataNotFound(group_uuid=group_uuid,
metadata_key=key)
def instance_group_metadata_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
rows = model_query(context,
models.InstanceGroupMetadata.key,
models.InstanceGroupMetadata.value,
base_model=models.InstanceGroupMetadata).\
filter_by(group_id=id).all()
return dict((r[0], r[1]) for r in rows)
def _instance_group_members_add(context, id, members, set_delete=False,
session=None):
if not session:
session = get_session()
all_members = set(members)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupMember,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupMember.instance_id.in_(
all_members)).\
soft_delete(synchronize_session=False)
query = query.filter(
models.InstanceGroupMember.instance_id.in_(all_members))
already_existing = set()
for member_ref in query.all():
already_existing.add(member_ref.instance_id)
for instance_id in members:
if instance_id in already_existing:
continue
member_ref = models.InstanceGroupMember()
member_ref.update({'instance_id': instance_id,
'group_id': id})
session.add(member_ref)
return members
def instance_group_members_add(context, group_uuid, members,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_members_add(context, id, members,
set_delete=set_delete)
def instance_group_member_delete(context, group_uuid, instance_id):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupMember,
models.InstanceGroupMember.group_id,
id).\
filter_by(instance_id=instance_id).\
soft_delete()
if count == 0:
raise exception.InstanceGroupMemberNotFound(group_uuid=group_uuid,
instance_id=instance_id)
def instance_group_members_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
instances = model_query(context,
models.InstanceGroupMember.instance_id,
base_model=models.InstanceGroupMember).\
filter_by(group_id=id).all()
return [instance[0] for instance in instances]
def _instance_group_policies_add(context, id, policies, set_delete=False,
session=None):
if not session:
session = get_session()
allpols = set(policies)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupPolicy,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\
soft_delete(synchronize_session=False)
query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols))
already_existing = set()
for policy_ref in query.all():
already_existing.add(policy_ref.policy)
for policy in policies:
if policy in already_existing:
continue
policy_ref = models.InstanceGroupPolicy()
policy_ref.update({'policy': policy,
'group_id': id})
session.add(policy_ref)
return policies
def instance_group_policies_add(context, group_uuid, policies,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_policies_add(context, id, policies,
set_delete=set_delete)
def instance_group_policy_delete(context, group_uuid, policy):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupPolicy,
models.InstanceGroupPolicy.group_id,
id).\
filter_by(policy=policy).\
soft_delete()
if count == 0:
raise exception.InstanceGroupPolicyNotFound(group_uuid=group_uuid,
policy=policy)
def instance_group_policies_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
policies = model_query(context,
models.InstanceGroupPolicy.policy,
base_model=models.InstanceGroupPolicy).\
filter_by(group_id=id).all()
return [policy[0] for policy in policies]
####################
@require_admin_context
def pci_device_get_by_addr(context, node_id, dev_addr):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=dev_addr).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr)
return pci_dev_ref
@require_admin_context
def pci_device_get_by_id(context, id):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(id=id).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFoundById(id=id)
return pci_dev_ref
@require_admin_context
def pci_device_get_all_by_node(context, node_id):
return model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
all()
@require_context
def pci_device_get_all_by_instance_uuid(context, instance_uuid):
return model_query(context, models.PciDevice).\
filter_by(status='allocated').\
filter_by(instance_uuid=instance_uuid).\
all()
def _instance_pcidevs_get_multi(context, instance_uuids, session=None):
return model_query(context, models.PciDevice, session=session).\
filter_by(status='allocated').\
filter(models.PciDevice.instance_uuid.in_(instance_uuids))
@require_admin_context
def pci_device_destroy(context, node_id, address):
result = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
soft_delete()
if not result:
raise exception.PciDeviceNotFound(node_id=node_id, address=address)
@require_admin_context
def pci_device_update(context, node_id, address, values):
session = get_session()
with session.begin():
device = model_query(context, models.PciDevice, session=session,
read_deleted="no").\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
first()
if not device:
device = models.PciDevice()
device.update(values)
session.add(device)
return device
| apache-2.0 | -2,111,573,966,483,229,000 | 35.335143 | 79 | 0.574421 | false |
unioslo/cerebrum | Cerebrum/modules/no/uio/OrgLDIF.py | 1 | 15602 | # -*- coding: utf-8 -*-
# Copyright 2004-2014 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import unicode_literals
import re
import pickle
from os.path import join as join_paths
from collections import defaultdict
import cereconf
from Cerebrum.modules.no.OrgLDIF import norEduLDIFMixin
from Cerebrum.modules.OrgLDIF import postal_escape_re
from Cerebrum.modules.LDIFutils import (
ldapconf, normalize_string, hex_escape_match,
normalize_IA5String, verify_IA5String,
)
from Cerebrum.Utils import make_timer
# Replace these characters with spaces in OU RDNs.
ou_rdn2space_re = re.compile('[#\"+,;<>\\\\=\0\\s]+')
class OrgLDIFUiOMixin(norEduLDIFMixin):
"""Mixin class for norEduLDIFMixin(OrgLDIF) with UiO modifications."""
def __init__(self, db, logger):
self.__super.__init__(db, logger)
self.attr2syntax['mobile'] = self.attr2syntax['telephoneNumber']
self.attr2syntax['uioVisiblePrivateMobile'] = \
self.attr2syntax['mobile']
self.attr2syntax['uioPrimaryMail'] = (None, verify_IA5String,
normalize_IA5String),
self.ou_quarantined = {}
def init_ou_dump(self):
self.__super.init_ou_dump()
self.get_ou_quarantines()
ou2parent = dict((c, p)
for p, ous in self.ou_tree.items()
for c in ous)
class Id2ou(dict):
# For missing id2ous, cache and return nearest parent or None
def __missing__(self, key):
val = self[key] = self[ou2parent.get(key)]
return val
self.ou_id2ou_uniq_id = Id2ou(self.ou_id2ou_uniq_id)
self.ou_id2ou_uniq_id.setdefault(None, None)
def test_omit_ou(self):
return (not self.ou.has_spread(self.const.spread_ou_publishable)) or \
self.ou_quarantined.get(self.ou.entity_id, False)
def get_ou_quarantines(self):
for row in self.ou.list_entity_quarantines(
entity_types=self.const.entity_ou,
quarantine_types=self.const.quarantine_ou_notvalid,
only_active=True):
self.ou_quarantined[int(row['entity_id'])] = True
def init_attr2id2contacts(self):
# Change from superclass: Include 'mobile' as well.
contact_source = getattr(self.const,
cereconf.LDAP['contact_source_system'])
contacts = [(attr, self.get_contacts(
contact_type=contact_type,
source_system=source_system,
convert=self.attr2syntax[attr][0],
verify=self.attr2syntax[attr][1],
normalize=self.attr2syntax[attr][2]))
for attr, source_system, contact_type in (
('telephoneNumber', contact_source, self.const.contact_phone),
('mobile', contact_source, self.const.contact_mobile_phone),
('uioVisiblePrivateMobile', contact_source,
self.const.contact_private_mobile_visible),
('facsimileTelephoneNumber', contact_source,
self.const.contact_fax),
('labeledURI', None, self.const.contact_url))]
self.id2labeledURI = contacts[-1][1]
self.attr2id2contacts = [v for v in contacts if v[1]]
def make_address(self, sep,
p_o_box, address_text, postal_number, city, country):
# Changes from superclass:
# Weird algorithm for when to use p_o_box.
# Append "Blindern" to postbox.
if country:
country = self.const.Country(country).country
if (p_o_box and int(postal_number or 0) / 100 == 3):
address_text = "Pb. %s - Blindern" % p_o_box
else:
address_text = (address_text or "").strip()
post_nr_city = None
if city or (postal_number and country):
post_nr_city = " ".join(filter(None, (postal_number,
(city or "").strip())))
val = "\n".join(filter(None, (address_text, post_nr_city, country)))
if sep == '$':
val = postal_escape_re.sub(hex_escape_match, val)
return val.replace("\n", sep)
def init_person_course(self):
"""Populate dicts with a person's course information."""
timer = make_timer(self.logger, 'Processing person courses...')
self.ownerid2urnlist = pickle.load(file(
join_paths(ldapconf(None, 'dump_dir'), "ownerid2urnlist.pickle")))
timer("...person courses done.")
def init_person_groups(self):
"""Populate dicts with a person's group information."""
timer = make_timer(self.logger, 'Processing person groups...')
self.person2group = pickle.load(file(
join_paths(ldapconf(None, 'dump_dir'), "personid2group.pickle")))
timer("...person groups done.")
def init_person_dump(self, use_mail_module):
"""Supplement the list of things to run before printing the
list of people."""
self.__super.init_person_dump(use_mail_module)
self.init_person_course()
self.init_person_groups()
def init_person_titles(self):
# Change from original: Search titles first by system_lookup_order,
# then within each system let personal title override work title.
timer = make_timer(self.logger, 'Fetching personal titles...')
titles = defaultdict(dict)
for name_type in (self.const.personal_title, self.const.work_title):
for row in self.person.search_name_with_language(
entity_type=self.const.entity_person,
name_variant=name_type,
name_language=self.languages):
titles[int(row['entity_id'])].setdefault(
int(row['name_language']), row['name'])
self.person_titles = dict([(p_id, t.items())
for p_id, t in titles.items()])
timer("...personal titles done.")
def init_account_mail(self, use_mail_module):
u""" Cache account mail addresses.
This method adds to the general to fill the primary email attribute
This is done to prepare for changing the normal email attribute
:param bool use_mail_module:
If True, Cerebrum.modules.Email will be used to populate this
cache; otherwise the `self.account_mail` dict will be None.
"""
super(OrgLDIFUiOMixin, self).init_account_mail(use_mail_module)
if use_mail_module:
timer = make_timer(
self.logger,
"Doing UiO specific changes to account e-mail addresses...")
self.account_primary_mail = self.account_mail.copy()
# We don't want to import this if mod_email isn't present.
from Cerebrum.modules.Email import EmailTarget
targets = EmailTarget(self.db).list_email_target_addresses
mail = {}
for row in targets(target_type=self.const.email_target_account,
domain='uio.no', uname_local=True):
# Can only return [email protected] so no need for any checks
mail[int(row['target_entity_id'])] = "@".join(
(row['local_part'], row['domain']))
self.account_mail.update(mail)
timer("...UiO specfic account e-mail addresses done.")
def make_uioPersonScopedAffiliation(self, p_id, pri_aff, pri_ou):
# [primary|secondary]:<affiliation>@<status>/<stedkode>
ret = []
pri_aff_str, pri_status_str = pri_aff
for aff, status, ou in self.affiliations[p_id]:
# populate the caches
if aff in self.aff_cache:
aff_str = self.aff_cache[aff]
else:
aff_str = str(self.const.PersonAffiliation(aff))
self.aff_cache[aff] = aff_str
if status in self.status_cache:
status_str = self.status_cache[status]
else:
status_str = str(self.const.PersonAffStatus(status).str)
self.status_cache[status] = status_str
p = 'secondary'
if (aff_str == pri_aff_str and
status_str == pri_status_str and ou == pri_ou):
p = 'primary'
ou = self.ou_id2ou_uniq_id[ou]
if ou:
ret.append(''.join((p, ':', aff_str, '/', status_str, '@',
ou)))
return ret
def make_person_entry(self, row, person_id):
""" Extend with UiO functionality. """
dn, entry, alias_info = self.__super.make_person_entry(row, person_id)
account_id = int(row['account_id'])
if not dn:
return dn, entry, alias_info
# Add or extend entitlements
if person_id in self.ownerid2urnlist:
urnlist = self.ownerid2urnlist[person_id]
if 'eduPersonEntitlement' in entry:
entry['eduPersonEntitlement'].update(urnlist)
else:
entry['eduPersonEntitlement'] = set(urnlist)
# Add person ID
entry['uioPersonId'] = str(person_id)
# Add group memberships
if person_id in self.person2group:
entry['uioMemberOf'] = self.person2group[person_id]
entry['objectClass'].append('uioMembership')
# Add scoped affiliations
pri_edu_aff, pri_ou, pri_aff = self.make_eduPersonPrimaryAffiliation(
person_id)
entry['uioPersonScopedAffiliation'] = \
self.make_uioPersonScopedAffiliation(person_id, pri_aff, pri_ou)
# uio attributes require uioPersonObject
entry['objectClass'].append('uioPersonObject')
# Check if there exists «avvikende» (deviant) addresses.
# If so, export them instead.
addrs = self.addr_info.get(person_id)
post = addrs and addrs.get(int(self.const.address_other_post))
if post:
a_txt, p_o_box, p_num, city, country = post
post = self.make_address("$", p_o_box, a_txt, p_num, city, country)
if post:
entry['postalAddress'] = (post,)
street = addrs and addrs.get(int(self.const.address_other_street))
if street:
a_txt, p_o_box, p_num, city, country = street
street = self.make_address(", ", None, a_txt, p_num, city, country)
if street:
entry['street'] = (street,)
if self.account_primary_mail:
mail = self.account_primary_mail.get(account_id)
if mail:
entry['uioPrimaryMail'] = mail
return dn, entry, alias_info
def _calculate_edu_OUs(self, p_ou, s_ous):
return s_ous
def init_person_selections(self, *args, **kwargs):
""" Extend with UiO settings for person selections.
This is especially for `no.uio.OrgLDIF.is_person_visible()`, as UiO has
some special needs in how to interpret visibility of persons due to
affiliations for reservation and consent, which behaves differently in
SAPUiO and FS.
"""
self.__super.init_person_selections(*args, **kwargs)
# Set what affiliations that should be checked for visibility from SAP
# and FS. The default is to set the person to NOT visible, which
# happens for all persons that doesn't have _any_ of the affiliations
# defined here.
self.visible_sap_affs = (int(self.const.affiliation_ansatt),)
tilkn_aff = int(self.const.affiliation_tilknyttet)
self.visible_sap_statuses = (
(tilkn_aff, int(self.const.affiliation_tilknyttet_ekst_stip)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_frida_reg)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_innkjoper)),
(tilkn_aff, int(self.const.
affiliation_tilknyttet_assosiert_person)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_ekst_forsker)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_emeritus)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_gjesteforsker)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_bilag)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_ekst_partner)),
)
student = int(self.const.affiliation_student)
self.fs_aff_statuses = (
(student, int(self.const.affiliation_status_student_aktiv)),
(student, int(self.const.affiliation_status_student_drgrad)),
(student, int(self.const.affiliation_status_student_emnestud)))
self.sap_res = self.init_person_group("SAP-elektroniske-reservasjoner")
self.fs_samtykke = self.init_person_group("FS-aktivt-samtykke")
def is_person_visible(self, person_id):
""" Override with UiO specific visibility.
At UiO, visibility is controlled differently depending on what source
system the person is from. SAPUiO has reservations, while FS has active
consents. Since we don't fetch source systems per affiliation from
Cerebrum in `OrgLDIF`, we only guess.
The reason for this override, is to support priority. SAP has priority
over FS, which can't be implemented through the configuration as of
today.
Note that the settings in `cereconf.LDAP_PERSON['visible_selector']` is
ignored by this override. The list of affiliations are hardcoded in the
method `init_person_selections`.
"""
# TODO: this could be changed to check the trait 'reserve_public'
# later, so we don't have to check group memberships.
#
# The trait behaves in the following manner:
# Every person should be 'invisible', except if:
# * The person has a trait of the type 'reserve_public', and
# * The trait's numval is set to 0
# This means that a missing trait should be considered as a
# reservation.
p_affs = self.affiliations[person_id]
# If there is an affiliation from SAP then consider
# reservations/permissions from SAP only.
for (aff, status, ou) in p_affs:
if aff in self.visible_sap_affs:
return person_id not in self.sap_res
if (aff, status) in self.visible_sap_statuses:
return person_id not in self.sap_res
# Otherwise, if there is an affiliaton STUDENT/<aktiv, emnestud or drgrad>,
# check for permission from FS to make the person visible.
for (aff, status, ou) in p_affs:
if (aff, status) in self.fs_aff_statuses:
return person_id in self.fs_samtykke
# Otherwise hide the person.
return False
| gpl-2.0 | -9,183,215,164,680,836,000 | 43.69914 | 83 | 0.604744 | false |
Microsoft/ApplicationInsights-Python | tests/applicationinsights_tests/channel_tests/TestSynchronousQueue.py | 1 | 1060 | import unittest
import sys, os, os.path
rootDirectory = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
if rootDirectory not in sys.path:
sys.path.append(rootDirectory)
from applicationinsights import channel
class TestSynchronousQueue(unittest.TestCase):
def test_construct(self):
actual = channel.SynchronousQueue(MockSynchronousSender())
self.assertIsNotNone(actual)
def test_flush_works_as_expected(self):
sender = MockSynchronousSender()
queue = channel.SynchronousQueue(sender)
queue.max_queue_length = 3
for i in range(1, 8):
queue.put(i)
self.assertEqual([[1, 2], [3], [4, 5], [6]], sender.data)
temp = []
while queue._queue.qsize() > 0:
temp.append(queue._queue.get())
self.assertEqual([7], temp)
class MockSynchronousSender:
def __init__(self):
self.send_buffer_size = 2
self.data = []
self.queue = None
def send(self, data_to_send):
self.data.append(data_to_send)
| mit | 1,191,793,918,607,608,000 | 29.285714 | 85 | 0.627358 | false |
cbertinato/pandas | pandas/core/missing.py | 1 | 23588 | """
Routines for filling missing data.
"""
import operator
import numpy as np
from pandas._libs import algos, lib
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.common import (
ensure_float64, is_datetime64_dtype, is_datetime64tz_dtype, is_float_dtype,
is_integer, is_integer_dtype, is_numeric_v_string_like, is_scalar,
is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import isna
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
try:
values_to_mask = np.array(values_to_mask, dtype=dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask = False
else:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if is_scalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask |= False
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isna(arr)
else:
mask |= isna(arr)
# GH 21977
if mask is None:
mask = np.zeros(arr.shape, dtype=bool)
return mask
def clean_fill_method(method, allow_nearest=False):
# asfreq is compat for resampling
if method in [None, 'asfreq']:
return None
if isinstance(method, str):
method = method.lower()
if method == 'ffill':
method = 'pad'
elif method == 'bfill':
method = 'backfill'
valid_methods = ['pad', 'backfill']
expecting = 'pad (ffill) or backfill (bfill)'
if allow_nearest:
valid_methods.append('nearest')
expecting = 'pad (ffill), backfill (bfill) or nearest'
if method not in valid_methods:
msg = ('Invalid fill method. Expecting {expecting}. Got {method}'
.format(expecting=expecting, method=method))
raise ValueError(msg)
return method
def clean_interp_method(method, **kwargs):
order = kwargs.get('order')
valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial', 'krogh',
'piecewise_polynomial', 'pchip', 'akima', 'spline',
'from_derivatives']
if method in ('spline', 'polynomial') and order is None:
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
raise ValueError("method must be one of {valid}. Got '{method}' "
"instead.".format(valid=valid, method=method))
return method
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
limit_direction='forward', limit_area=None, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argument.
"""
# Treat the original, non-scipy methods first.
invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
# have to call np.asarray(xvalues) since xvalues could be an Index
# which can't be mutated
result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if valid.all():
return yvalues
if method == 'time':
if not getattr(xvalues, 'is_all_dates', None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError('time-weighted interpolation only works '
'on Series or DataFrames with a '
'DatetimeIndex')
method = 'values'
valid_limit_directions = ['forward', 'backward', 'both']
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
msg = ('Invalid limit_direction: expecting one of {valid!r}, '
'got {invalid!r}.')
raise ValueError(msg.format(valid=valid_limit_directions,
invalid=limit_direction))
if limit_area is not None:
valid_limit_areas = ['inside', 'outside']
limit_area = limit_area.lower()
if limit_area not in valid_limit_areas:
raise ValueError('Invalid limit_area: expecting one of {}, got '
'{}.'.format(valid_limit_areas, limit_area))
# default limit is unlimited GH #16282
if limit is None:
# limit = len(xvalues)
pass
elif not is_integer(limit):
raise ValueError('Limit must be an integer')
elif limit < 1:
raise ValueError('Limit must be greater than 0')
from pandas import Series
ys = Series(yvalues)
# These are sets of index pointers to invalid values... i.e. {0, 1, etc...
all_nans = set(np.flatnonzero(invalid))
start_nans = set(range(ys.first_valid_index()))
end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
mid_nans = all_nans - start_nans - end_nans
# Like the sets above, preserve_nans contains indices of invalid values,
# but in this case, it is the final set of indices that need to be
# preserved as NaN after the interpolation.
# For example if limit_direction='forward' then preserve_nans will
# contain indices of NaNs at the beginning of the series, and NaNs that
# are more than'limit' away from the prior non-NaN.
# set preserve_nans based on direction using _interp_limit
if limit_direction == 'forward':
preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))
elif limit_direction == 'backward':
preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))
else:
# both directions... just use _interp_limit
preserve_nans = set(_interp_limit(invalid, limit, limit))
# if limit_area is set, add either mid or outside indices
# to preserve_nans GH #16284
if limit_area == 'inside':
# preserve NaNs on the outside
preserve_nans |= start_nans | end_nans
elif limit_area == 'outside':
# preserve NaNs on the inside
preserve_nans |= mid_nans
# sort preserve_nans and covert to list
preserve_nans = sorted(preserve_nans)
xvalues = getattr(xvalues, 'values', xvalues)
yvalues = getattr(yvalues, 'values', yvalues)
result = yvalues.copy()
if method in ['linear', 'time', 'index', 'values']:
if method in ('values', 'index'):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if needs_i8_conversion(inds.dtype.type):
inds = inds.view(np.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
result[preserve_nans] = np.nan
return result
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima']
if method in sp_methods:
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(np.int64)
result[invalid] = _interpolate_scipy_wrapper(inds[valid],
yvalues[valid],
inds[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order, **kwargs)
result[preserve_nans] = np.nan
return result
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
Passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method.
"""
extra = '{method} interpolation requires SciPy.'.format(method=method)
import_optional_dependency('scipy', extra=extra)
from scipy import interpolate
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
'barycentric': interpolate.barycentric_interpolate,
'krogh': interpolate.krogh_interpolate,
'from_derivatives': _from_derivatives,
'piecewise_polynomial': _from_derivatives,
}
if getattr(x, 'is_all_dates', False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x._values.astype('i8'), new_x.astype('i8')
if method == 'pchip':
try:
alt_methods['pchip'] = interpolate.pchip_interpolate
except AttributeError:
raise ImportError("Your version of Scipy does not support "
"PCHIP interpolation.")
elif method == 'akima':
alt_methods['akima'] = _akima_interpolate
interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial']
if method in interp1d_methods:
if method == 'polynomial':
method = order
terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
# GH #10633, #24014
if isna(order) or (order <= 0):
raise ValueError("order needs to be specified and greater than 0; "
"got order: {}".format(order))
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)
return new_y
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives.
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R.
"""
from scipy import interpolate
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1),
orders=order, extrapolate=extrapolate)
return m(x)
def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
scipy.interpolate.Akima1DInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
from scipy import interpolate
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif interpolate._isscalar(der):
return P(x, der=der)
else:
return [P(x, nu) for nu in der]
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None,
dtype=None):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(backfill_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0]
return values
def _cast_values_for_fillna(values, dtype):
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
# TODO: for int-dtypes we make a copy, but for everything else this
# alters the values in-place. Is this intentional?
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype)):
values = values.view(np.int64)
elif is_integer_dtype(values):
# NB: this check needs to come after the datetime64 check above
values = ensure_float64(values)
return values
def _fillna_prep(values, mask=None, dtype=None):
# boilerplate for pad_1d, backfill_1d, pad_2d, backfill_2d
if dtype is None:
dtype = values.dtype
if mask is None:
# This needs to occur before datetime/timedeltas are cast to int64
mask = isna(values)
values = _cast_values_for_fillna(values, dtype)
mask = mask.view(np.uint8)
return values, mask
def pad_1d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
algos.pad_inplace(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
algos.backfill_inplace(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
algos.pad_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
algos.backfill_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
def get_fill_func(method):
method = clean_fill_method(method)
return _fill_methods[method]
def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
def fill_zeros(result, x, y, name, fill):
"""
If this is a reversed op, then flip x,y
If we have an integer value (or array in y)
and we have 0's, fill them with the fill,
return the result.
Mask the nan's from x.
"""
if fill is None or is_float_dtype(result):
return result
if name.startswith(('r', '__r')):
x, y = y, x
is_variable_type = (hasattr(y, 'dtype') or hasattr(y, 'type'))
is_scalar_type = is_scalar(y)
if not is_variable_type and not is_scalar_type:
return result
if is_scalar_type:
y = np.array(y)
if is_integer_dtype(y):
if (y == 0).any():
# GH 7325, mask and nans must be broadcastable (also: PR 9308)
# Raveling and then reshaping makes np.putmask faster
mask = ((y == 0) & ~np.isnan(result)).ravel()
shape = result.shape
result = result.astype('float64', copy=False).ravel()
np.putmask(result, mask, fill)
# if we have a fill of inf, then sign it correctly
# (GH 6178 and PR 9308)
if np.isinf(fill):
signs = y if name.startswith(('r', '__r')) else x
signs = np.sign(signs.astype('float', copy=False))
negative_inf_mask = (signs.ravel() < 0) & mask
np.putmask(result, negative_inf_mask, -fill)
if "floordiv" in name: # (PR 9308)
nan_mask = ((y == 0) & (x == 0)).ravel()
np.putmask(result, nan_mask, np.nan)
result = result.reshape(shape)
return result
def mask_zero_div_zero(x, y, result, copy=False):
"""
Set results of 0 / 0 or 0 // 0 to np.nan, regardless of the dtypes
of the numerator or the denominator.
Parameters
----------
x : ndarray
y : ndarray
result : ndarray
copy : bool (default False)
Whether to always create a new array or try to fill in the existing
array if possible.
Returns
-------
filled_result : ndarray
Examples
--------
>>> x = np.array([1, 0, -1], dtype=np.int64)
>>> y = 0 # int 0; numpy behavior is different with float
>>> result = x / y
>>> result # raw numpy result does not fill division by zero
array([0, 0, 0])
>>> mask_zero_div_zero(x, y, result)
array([ inf, nan, -inf])
"""
if is_scalar(y):
y = np.array(y)
zmask = y == 0
if zmask.any():
shape = result.shape
nan_mask = (zmask & (x == 0)).ravel()
neginf_mask = (zmask & (x < 0)).ravel()
posinf_mask = (zmask & (x > 0)).ravel()
if nan_mask.any() or neginf_mask.any() or posinf_mask.any():
# Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN
result = result.astype('float64', copy=copy).ravel()
np.putmask(result, nan_mask, np.nan)
np.putmask(result, posinf_mask, np.inf)
np.putmask(result, neginf_mask, -np.inf)
result = result.reshape(shape)
return result
def dispatch_missing(op, left, right, result):
"""
Fill nulls caused by division by zero, casting to a different dtype
if necessary.
Parameters
----------
op : function (operator.add, operator.div, ...)
left : object (Index for non-reversed ops)
right : object (Index fof reversed ops)
result : ndarray
Returns
-------
result : ndarray
"""
opstr = '__{opname}__'.format(opname=op.__name__).replace('____', '__')
if op in [operator.truediv, operator.floordiv,
getattr(operator, 'div', None)]:
result = mask_zero_div_zero(left, right, result)
elif op is operator.mod:
result = fill_zeros(result, left, right, opstr, np.nan)
elif op is divmod:
res0 = mask_zero_div_zero(left, right, result[0])
res1 = fill_zeros(result[1], left, right, opstr, np.nan)
result = (res0, res1)
return result
def _interp_limit(invalid, fw_limit, bw_limit):
"""
Get indexers of values that won't be filled
because they exceed the limits.
Parameters
----------
invalid : boolean ndarray
fw_limit : int or None
forward limit to index
bw_limit : int or None
backward limit to index
Returns
-------
set of indexers
Notes
-----
This is equivalent to the more readable, but slower
.. code-block:: python
def _interp_limit(invalid, fw_limit, bw_limit):
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
# 2. subtract the returned indices from N - 1
N = len(invalid)
f_idx = set()
b_idx = set()
def inner(invalid, limit):
limit = min(limit, N)
windowed = _rolling_window(invalid, limit + 1).all(1)
idx = (set(np.where(windowed)[0] + limit) |
set(np.where((~invalid[:limit + 1]).cumsum() == 0)[0]))
return idx
if fw_limit is not None:
if fw_limit == 0:
f_idx = set(np.where(invalid)[0])
else:
f_idx = inner(invalid, fw_limit)
if bw_limit is not None:
if bw_limit == 0:
# then we don't even need to care about backwards
# just use forwards
return f_idx
else:
b_idx = list(inner(invalid[::-1], bw_limit))
b_idx = set(N - 1 - np.asarray(b_idx))
if fw_limit == 0:
return b_idx
return f_idx & b_idx
def _rolling_window(a, window):
"""
[True, True, False, True, False], 2 ->
[
[True, True],
[True, False],
[False, True],
[True, False],
]
"""
# https://stackoverflow.com/a/6811241
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
| bsd-3-clause | -1,535,343,801,592,295,700 | 31.445667 | 79 | 0.588265 | false |
GMadorell/djagolb | src/blog/views.py | 1 | 4684 | from collections import OrderedDict, Iterable
import pdb
from django.contrib.sites.models import Site
from django.http import Http404
from django.views import generic
from django.views.generic.base import ContextMixin
from .models import BlogPostModel, Author, Tag
class AuthorContextMixin(ContextMixin):
author_model = Author
def get_context_data(self, **kwargs):
context = super(AuthorContextMixin, self).get_context_data(**kwargs)
context["author"] = self.author_model.objects.all()[0]
return context
class SiteContextMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super(SiteContextMixin, self).get_context_data(**kwargs)
context["site"] = Site.objects.all()[0]
return context
class BlogIndexView(
AuthorContextMixin,
SiteContextMixin,
generic.ListView
):
template_name = "blog/blog_index.html"
model = BlogPostModel
POSTS_PER_PAGE = 3
def get_queryset(self):
self.validate_correct_page()
return self.model.objects.order_by("-posted_at")[
self.get_starting_index():self.get_ending_index()]
def get_context_data(self, **kwargs):
self.validate_correct_page()
context = super(BlogIndexView, self).get_context_data(**kwargs)
context["has_older_posts"] = \
self.get_ending_index() < self.get_amount_posts()
context["has_newer_posts"] = self.get_starting_index() > 0
context["page"] = self.kwargs.get("page")
return context
def validate_correct_page(self):
if self.get_page() < 1:
raise Http404
if self.get_starting_index() > self.get_amount_posts():
raise Http404
def get_page(self):
return int(self.kwargs.get("page"))
def get_amount_posts(self):
return self.model.objects.count()
def get_starting_index(self):
return (self.get_page() - 1) * self.POSTS_PER_PAGE
def get_ending_index(self):
return self.get_starting_index() + self.POSTS_PER_PAGE
class BlogPostDetail(
AuthorContextMixin,
SiteContextMixin,
generic.DetailView,
):
template_name = "blog/blogpost.html"
context_object_name = "blogpost"
model = BlogPostModel
class ArchiveView(
AuthorContextMixin,
generic.TemplateView,
):
template_name = "blog/archive.html"
def get_context_data(self, **kwargs):
context = super(ArchiveView, self).get_context_data(**kwargs)
archive = OrderedDict()
posted_at_values = \
BlogPostModel.objects.order_by("-posted_at") \
.values_list("posted_at", flat=True)
# Make sure values are unique and ordered from high value to lower.
years = sorted(
list(set(map(lambda posted_at: posted_at.year, posted_at_values))),
reverse=True)
for year in years:
year_dic = OrderedDict()
posted_at_year = \
BlogPostModel.objects.filter(posted_at__year=year) \
.order_by("-posted_at") \
.values_list("posted_at", flat=True)
months = sorted(list(
set(map(lambda posted_at: posted_at.month, posted_at_year))),
reverse=True)
for month in months:
month_dic = OrderedDict()
posted_at_year_month = \
BlogPostModel.objects.filter(posted_at__year=year) \
.filter(posted_at__month=month) \
.order_by("-posted_at") \
.values_list("posted_at", flat=True)
days = sorted(list(set(map(lambda posted_at: posted_at.day,
posted_at_year_month))),
reverse=True)
for day in days:
blogposts_at_day = \
BlogPostModel.objects.filter(posted_at__year=year) \
.filter(posted_at__month=month) \
.filter(posted_at__day=day) \
.order_by("-posted_at")
month_dic[day] = list(blogposts_at_day)
year_dic[month] = month_dic
archive[year] = year_dic
context["archive"] = archive
context["test"] = BlogPostModel.objects.all()
return context
class AboutView(
generic.TemplateView,
AuthorContextMixin,
):
template_name = "blog/about.html"
class TagView(
generic.ListView,
AuthorContextMixin,
):
model = Tag
template_name = "blog/tags.html"
context_object_name = "tags"
| mit | 9,116,562,323,284,935,000 | 29.415584 | 79 | 0.583262 | false |
WoLpH/EventGhost | eg/WinApi/Dynamic/Mmsystem.py | 1 | 11993 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
#pylint: disable-msg=C0103,C0301,C0302
# This file gets automatically extended by ctypeslib.dynamic_module, so don't
# edit it yourself.
import sys
# Local imports
from eg.WinApi.Dynamic import *
_Winmm = WinDLL("Winmm")
if __name__ == "__main__":
try:
ctypeslib = __import__("ctypeslib.dynamic_module")
except ImportError:
print "ctypeslib is not installed!"
else:
try:
ctypeslib.dynamic_module.include(
"#define UNICODE\n"
"#define _WIN32_WINNT 0x500\n"
"#define WIN32_LEAN_AND_MEAN\n"
"#define NO_STRICT\n"
"#include <windows.h>\n"
"#include <Mmsystem.h>\n"
)
except WindowsError:
print "GCC_XML most likely not installed"
#-----------------------------------------------------------------------------#
# everything after the following line is automatically created
#-----------------------------------------------------------------------------#
MIXERCONTROL_CT_CLASS_MASK = 4026531840L # Variable c_ulong '-268435456ul'
MIXERCONTROL_CT_CLASS_FADER = 1342177280 # Variable c_long '1342177280l'
MIXERCONTROL_CONTROLTYPE_VOLUME = 1342373889 # Variable c_long '1342373889l'
MIXERCONTROL_CONTROLTYPE_BASS = 1342373890 # Variable c_long '1342373890l'
MIXERCONTROL_CONTROLTYPE_TREBLE = 1342373891 # Variable c_long '1342373891l'
MIXERCONTROL_CONTROLTYPE_EQUALIZER = 1342373892 # Variable c_long '1342373892l'
MIXERCONTROL_CONTROLTYPE_FADER = 1342373888 # Variable c_long '1342373888l'
MIXERCONTROL_CT_CLASS_LIST = 1879048192 # Variable c_long '1879048192l'
MIXERCONTROL_CONTROLTYPE_SINGLESELECT = 1879113728 # Variable c_long '1879113728l'
MIXERCONTROL_CONTROLTYPE_MULTIPLESELECT = 1895890944 # Variable c_long '1895890944l'
MIXERCONTROL_CONTROLTYPE_MUX = 1879113729 # Variable c_long '1879113729l'
MIXERCONTROL_CONTROLTYPE_MIXER = 1895890945 # Variable c_long '1895890945l'
MIXERCONTROL_CT_CLASS_METER = 268435456 # Variable c_long '268435456l'
MIXERCONTROL_CONTROLTYPE_BOOLEANMETER = 268500992 # Variable c_long '268500992l'
MIXERCONTROL_CONTROLTYPE_PEAKMETER = 268566529 # Variable c_long '268566529l'
MIXERCONTROL_CONTROLTYPE_SIGNEDMETER = 268566528 # Variable c_long '268566528l'
MIXERCONTROL_CONTROLTYPE_UNSIGNEDMETER = 268632064 # Variable c_long '268632064l'
MIXERCONTROL_CT_CLASS_NUMBER = 805306368 # Variable c_long '805306368l'
MIXERCONTROL_CONTROLTYPE_SIGNED = 805437440 # Variable c_long '805437440l'
MIXERCONTROL_CONTROLTYPE_UNSIGNED = 805502976 # Variable c_long '805502976l'
MIXERCONTROL_CONTROLTYPE_PERCENT = 805634048 # Variable c_long '805634048l'
MIXERCONTROL_CONTROLTYPE_DECIBELS = 805568512 # Variable c_long '805568512l'
MIXERCONTROL_CT_CLASS_SLIDER = 1073741824 # Variable c_long '1073741824l'
MIXERCONTROL_CONTROLTYPE_SLIDER = 1073872896 # Variable c_long '1073872896l'
MIXERCONTROL_CONTROLTYPE_PAN = 1073872897 # Variable c_long '1073872897l'
MIXERCONTROL_CONTROLTYPE_QSOUNDPAN = 1073872898 # Variable c_long '1073872898l'
MIXERCONTROL_CT_CLASS_SWITCH = 536870912 # Variable c_long '536870912l'
MIXERCONTROL_CONTROLTYPE_BOOLEAN = 536936448 # Variable c_long '536936448l'
MIXERCONTROL_CONTROLTYPE_BUTTON = 553713664 # Variable c_long '553713664l'
MIXERCONTROL_CONTROLTYPE_LOUDNESS = 536936452 # Variable c_long '536936452l'
MIXERCONTROL_CONTROLTYPE_MONO = 536936451 # Variable c_long '536936451l'
MIXERCONTROL_CONTROLTYPE_MUTE = 536936450 # Variable c_long '536936450l'
MIXERCONTROL_CONTROLTYPE_ONOFF = 536936449 # Variable c_long '536936449l'
MIXERCONTROL_CONTROLTYPE_STEREOENH = 536936453 # Variable c_long '536936453l'
MIXERCONTROL_CT_CLASS_TIME = 1610612736 # Variable c_long '1610612736l'
MIXERCONTROL_CONTROLTYPE_MICROTIME = 1610809344 # Variable c_long '1610809344l'
MIXERCONTROL_CONTROLTYPE_MILLITIME = 1627586560 # Variable c_long '1627586560l'
MIXERCONTROL_CT_CLASS_CUSTOM = 0 # Variable c_long '0l'
MIXERCONTROL_CONTROLTYPE_CUSTOM = 0 # Variable c_long '0l'
class tMIXERCONTROLDETAILS_UNSIGNED(Structure):
pass
MIXERCONTROLDETAILS_UNSIGNED = tMIXERCONTROLDETAILS_UNSIGNED
tMIXERCONTROLDETAILS_UNSIGNED._pack_ = 1
tMIXERCONTROLDETAILS_UNSIGNED._fields_ = [
('dwValue', DWORD),
]
class tMIXERCONTROLDETAILS_SIGNED(Structure):
pass
MIXERCONTROLDETAILS_SIGNED = tMIXERCONTROLDETAILS_SIGNED
tMIXERCONTROLDETAILS_SIGNED._pack_ = 1
tMIXERCONTROLDETAILS_SIGNED._fields_ = [
('lValue', LONG),
]
class tMIXERCONTROLDETAILS_BOOLEAN(Structure):
pass
MIXERCONTROLDETAILS_BOOLEAN = tMIXERCONTROLDETAILS_BOOLEAN
tMIXERCONTROLDETAILS_BOOLEAN._pack_ = 1
tMIXERCONTROLDETAILS_BOOLEAN._fields_ = [
('fValue', LONG),
]
class tagMIXERCONTROLDETAILS_LISTTEXTW(Structure):
pass
MIXERCONTROLDETAILS_LISTTEXTW = tagMIXERCONTROLDETAILS_LISTTEXTW
MIXERCONTROLDETAILS_LISTTEXT = MIXERCONTROLDETAILS_LISTTEXTW
tagMIXERCONTROLDETAILS_LISTTEXTW._pack_ = 1
tagMIXERCONTROLDETAILS_LISTTEXTW._fields_ = [
('dwParam1', DWORD),
('dwParam2', DWORD),
('szName', WCHAR * 64),
]
MIXERCONTROL_CONTROLF_DISABLED = 2147483648L # Variable c_ulong '-2147483648ul'
MIXERCONTROL_CONTROLF_MULTIPLE = 2 # Variable c_long '2l'
MIXERCONTROL_CONTROLF_UNIFORM = 1 # Variable c_long '1l'
MMSYSERR_NOERROR = 0 # Variable c_int '0'
class tagMIXERCAPSW(Structure):
pass
MIXERCAPSW = tagMIXERCAPSW
MIXERCAPS = MIXERCAPSW
MMVERSION = UINT
tagMIXERCAPSW._pack_ = 1
tagMIXERCAPSW._fields_ = [
('wMid', WORD),
('wPid', WORD),
('vDriverVersion', MMVERSION),
('szPname', WCHAR * 32),
('fdwSupport', DWORD),
('cDestinations', DWORD),
]
class tagMIXERLINEW(Structure):
pass
MIXERLINEW = tagMIXERLINEW
MIXERLINE = MIXERLINEW
class N13tagMIXERLINEW5DOLLAR_112E(Structure):
pass
N13tagMIXERLINEW5DOLLAR_112E._pack_ = 1
N13tagMIXERLINEW5DOLLAR_112E._fields_ = [
('dwType', DWORD),
('dwDeviceID', DWORD),
('wMid', WORD),
('wPid', WORD),
('vDriverVersion', MMVERSION),
('szPname', WCHAR * 32),
]
tagMIXERLINEW._pack_ = 1
tagMIXERLINEW._fields_ = [
('cbStruct', DWORD),
('dwDestination', DWORD),
('dwSource', DWORD),
('dwLineID', DWORD),
('fdwLine', DWORD),
('dwUser', DWORD_PTR),
('dwComponentType', DWORD),
('cChannels', DWORD),
('cConnections', DWORD),
('cControls', DWORD),
('szShortName', WCHAR * 16),
('szName', WCHAR * 64),
('Target', N13tagMIXERLINEW5DOLLAR_112E),
]
class tagMIXERCONTROLW(Structure):
pass
MIXERCONTROLW = tagMIXERCONTROLW
MIXERCONTROL = MIXERCONTROLW
class N16tagMIXERCONTROLW5DOLLAR_117E(Union):
pass
class N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_118E(Structure):
pass
N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_118E._pack_ = 1
N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_118E._fields_ = [
('lMinimum', LONG),
('lMaximum', LONG),
]
class N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_119E(Structure):
pass
N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_119E._pack_ = 1
N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_119E._fields_ = [
('dwMinimum', DWORD),
('dwMaximum', DWORD),
]
N16tagMIXERCONTROLW5DOLLAR_117E._pack_ = 1
N16tagMIXERCONTROLW5DOLLAR_117E._anonymous_ = ['_0', '_1']
N16tagMIXERCONTROLW5DOLLAR_117E._fields_ = [
('_0', N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_118E),
('_1', N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_119E),
('dwReserved', DWORD * 6),
]
class N16tagMIXERCONTROLW5DOLLAR_120E(Union):
pass
N16tagMIXERCONTROLW5DOLLAR_120E._pack_ = 1
N16tagMIXERCONTROLW5DOLLAR_120E._fields_ = [
('cSteps', DWORD),
('cbCustomData', DWORD),
('dwReserved', DWORD * 6),
]
tagMIXERCONTROLW._pack_ = 1
tagMIXERCONTROLW._fields_ = [
('cbStruct', DWORD),
('dwControlID', DWORD),
('dwControlType', DWORD),
('fdwControl', DWORD),
('cMultipleItems', DWORD),
('szShortName', WCHAR * 16),
('szName', WCHAR * 64),
('Bounds', N16tagMIXERCONTROLW5DOLLAR_117E),
('Metrics', N16tagMIXERCONTROLW5DOLLAR_120E),
]
class tagMIXERLINECONTROLSW(Structure):
pass
MIXERLINECONTROLSW = tagMIXERLINECONTROLSW
MIXERLINECONTROLS = MIXERLINECONTROLSW
class N21tagMIXERLINECONTROLSW5DOLLAR_122E(Union):
pass
N21tagMIXERLINECONTROLSW5DOLLAR_122E._pack_ = 1
N21tagMIXERLINECONTROLSW5DOLLAR_122E._fields_ = [
('dwControlID', DWORD),
('dwControlType', DWORD),
]
LPMIXERCONTROLW = POINTER(tagMIXERCONTROLW)
tagMIXERLINECONTROLSW._pack_ = 1
tagMIXERLINECONTROLSW._anonymous_ = ['_0']
tagMIXERLINECONTROLSW._fields_ = [
('cbStruct', DWORD),
('dwLineID', DWORD),
('_0', N21tagMIXERLINECONTROLSW5DOLLAR_122E),
('cControls', DWORD),
('cbmxctrl', DWORD),
('pamxctrl', LPMIXERCONTROLW),
]
class tMIXERCONTROLDETAILS(Structure):
pass
MIXERCONTROLDETAILS = tMIXERCONTROLDETAILS
class N20tMIXERCONTROLDETAILS5DOLLAR_123E(Union):
pass
N20tMIXERCONTROLDETAILS5DOLLAR_123E._pack_ = 1
N20tMIXERCONTROLDETAILS5DOLLAR_123E._fields_ = [
('hwndOwner', HWND),
('cMultipleItems', DWORD),
]
tMIXERCONTROLDETAILS._pack_ = 1
tMIXERCONTROLDETAILS._anonymous_ = ['_0']
tMIXERCONTROLDETAILS._fields_ = [
('cbStruct', DWORD),
('dwControlID', DWORD),
('cChannels', DWORD),
('_0', N20tMIXERCONTROLDETAILS5DOLLAR_123E),
('cbDetails', DWORD),
('paDetails', LPVOID),
]
HMIXER = HANDLE
MMRESULT = UINT
LPHMIXER = POINTER(HMIXER)
mixerOpen = _Winmm.mixerOpen
mixerOpen.restype = MMRESULT
mixerOpen.argtypes = [LPHMIXER, UINT, DWORD_PTR, DWORD_PTR, DWORD]
LPMIXERCAPSW = POINTER(tagMIXERCAPSW)
mixerGetDevCapsW = _Winmm.mixerGetDevCapsW
mixerGetDevCapsW.restype = MMRESULT
mixerGetDevCapsW.argtypes = [UINT_PTR, LPMIXERCAPSW, UINT]
mixerGetDevCaps = mixerGetDevCapsW # alias
HMIXEROBJ = HANDLE
LPMIXERLINEW = POINTER(tagMIXERLINEW)
mixerGetLineInfoW = _Winmm.mixerGetLineInfoW
mixerGetLineInfoW.restype = MMRESULT
mixerGetLineInfoW.argtypes = [HMIXEROBJ, LPMIXERLINEW, DWORD]
mixerGetLineInfo = mixerGetLineInfoW # alias
LPMIXERLINECONTROLSW = POINTER(tagMIXERLINECONTROLSW)
mixerGetLineControlsW = _Winmm.mixerGetLineControlsW
mixerGetLineControlsW.restype = MMRESULT
mixerGetLineControlsW.argtypes = [HMIXEROBJ, LPMIXERLINECONTROLSW, DWORD]
mixerGetLineControls = mixerGetLineControlsW # alias
LPMIXERCONTROLDETAILS = POINTER(tMIXERCONTROLDETAILS)
mixerGetControlDetailsW = _Winmm.mixerGetControlDetailsW
mixerGetControlDetailsW.restype = MMRESULT
mixerGetControlDetailsW.argtypes = [HMIXEROBJ, LPMIXERCONTROLDETAILS, DWORD]
mixerGetControlDetails = mixerGetControlDetailsW # alias
MIXER_GETLINEINFOF_DESTINATION = 0 # Variable c_long '0l'
MIXER_GETLINEINFOF_SOURCE = 1 # Variable c_long '1l'
MIXER_GETLINECONTROLSF_ALL = 0 # Variable c_long '0l'
MIXER_GETLINECONTROLSF_ONEBYID = 1 # Variable c_long '1l'
MIXER_GETCONTROLDETAILSF_VALUE = 0 # Variable c_long '0l'
MIXER_GETCONTROLDETAILSF_LISTTEXT = 1 # Variable c_long '1l'
mixerGetNumDevs = _Winmm.mixerGetNumDevs
mixerGetNumDevs.restype = UINT
mixerGetNumDevs.argtypes = []
mixerSetControlDetails = _Winmm.mixerSetControlDetails
mixerSetControlDetails.restype = MMRESULT
mixerSetControlDetails.argtypes = [HMIXEROBJ, LPMIXERCONTROLDETAILS, DWORD]
MIXERLINE_COMPONENTTYPE_DST_SPEAKERS = 4 # Variable c_long '4l'
MIXER_GETLINEINFOF_COMPONENTTYPE = 3 # Variable c_long '3l'
MIXER_GETLINECONTROLSF_ONEBYTYPE = 2 # Variable c_long '2l'
| gpl-2.0 | -8,111,707,828,437,590,000 | 38.973333 | 85 | 0.737241 | false |
decarboxy/py_protein_utils | rosettautil/bcl/file_formats.py | 1 | 1538 | from rosettautil.util import fileutil
import sys
class list_of_2D_vectors:
def __init__(self):
self.records = []
def add_record(self, first_col,second_col):
self.records.append((first_col,second_col))
def write_bcl_file(self,path):
out_file = fileutil.universal_open(path,'w')
list_header ="bcl::storage::List<bcl::storage::VectorND2<bcl::math::Vector<double>>>"
vector_header = "bcl::storage::VectorND2<bcl::math::Vector<double>>"
double_header = "bcl::math::Vector<double>"
out_file.write(list_header+"\n")
out_file.write(str(len(self.records))+"\n")
for first_col, second_col in self.records:
out_file.write(vector_header+"\n")
out_file.write(double_header+"\n")
out_file.write(str(1)+"\n")
out_file.write(str(first_col)+"\n")
out_file.write(double_header+"\n")
out_file.write(str(1)+"\n")
out_file.write(str(second_col)+"\n")
out_file.close()
def read_bcl_file(self,path):
print "This function doesn't work yet"
sys.exit()
out_file = fileutil.universal_open(path,'r')
list_header ="bcl::storage::List<bcl::storage::VectorND2<bcl::math::Vector<double>>>"
vector_header = "bcl::storage::VectorND2<bcl::math::Vector<double>>"
double_header = "bcl::math::Vector<double>"
list_scope = False
vector_scope = False
double_scope = False | mit | 748,329,537,673,460,700 | 37.475 | 93 | 0.576723 | false |
hkff/AccLab | pyAAL/FOTLOperators.py | 1 | 1135 | """
FOTLOperators
Copyright (C) 2014 Walid Benghabrit
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'walid'
from enum import Enum
# FOTL operators
class FOTLOperators(Enum):
"""
Fotl operators in tspass syntax
"""
t_equal = '='
t_not = '~'
t_and = '&'
t_or = '|'
t_implication = '=>'
t_equivalence = '<=>'
t_forall = '!'
t_exists = '?'
t_always = 'always'
t_next = 'next'
t_sometime = 'sometime'
t_until = 'until'
t_unless = 'unless'
def __str__(self):
return self.value
| gpl-3.0 | -7,398,909,174,859,723,000 | 25.395349 | 69 | 0.671366 | false |
openstack/os-vif | os_vif/tests/functional/base.py | 1 | 5005 | # Derived from: neutron/tests/functional/base.py
# neutron/tests/base.py
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import functools
import inspect
import os
import sys
import eventlet.timeout
from os_vif import version as osvif_version
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslotest import base
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _get_test_log_path():
return os.environ.get('OS_LOG_PATH', '/tmp')
# This is the directory from which infra fetches log files for functional tests
DEFAULT_LOG_DIR = os.path.join(_get_test_log_path(), 'osvif-functional-logs')
def wait_until_true(predicate, timeout=15, sleep=1):
"""Wait until callable predicate is evaluated as True
:param predicate: Callable deciding whether waiting should continue.
Best practice is to instantiate predicate with
``functools.partial()``.
:param timeout: Timeout in seconds how long should function wait.
:param sleep: Polling interval for results in seconds.
:return: True if the predicate is evaluated as True within the timeout,
False in case of timeout evaluating the predicate.
"""
try:
with eventlet.Timeout(timeout):
while not predicate():
eventlet.sleep(sleep)
except eventlet.Timeout:
return False
return True
class _CatchTimeoutMetaclass(abc.ABCMeta):
def __init__(cls, name, bases, dct):
super(_CatchTimeoutMetaclass, cls).__init__(name, bases, dct)
for name, method in inspect.getmembers(
# NOTE(ihrachys): we should use isroutine because it will catch
# both unbound methods (python2) and functions (python3)
cls, predicate=inspect.isroutine):
if name.startswith('test_'):
setattr(cls, name, cls._catch_timeout(method))
@staticmethod
def _catch_timeout(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except eventlet.Timeout as e:
self.fail('Execution of this test timed out: %s' % e)
return func
def setup_logging(component_name):
"""Sets up the logging options for a log with supplied name."""
logging.setup(cfg.CONF, component_name)
LOG.info("Logging enabled!")
LOG.info("%(prog)s version %(version)s",
{'prog': sys.argv[0], 'version': osvif_version.__version__})
LOG.debug("command line: %s", " ".join(sys.argv))
def sanitize_log_path(path):
"""Sanitize the string so that its log path is shell friendly"""
return path.replace(' ', '-').replace('(', '_').replace(')', '_')
# Test worker cannot survive eventlet's Timeout exception, which effectively
# kills the whole worker, with all test cases scheduled to it. This metaclass
# makes all test cases convert Timeout exceptions into unittest friendly
# failure mode (self.fail).
class BaseFunctionalTestCase(base.BaseTestCase,
metaclass=_CatchTimeoutMetaclass):
"""Base class for functional tests."""
COMPONENT_NAME = 'os_vif'
PRIVILEGED_GROUP = 'os_vif_privileged'
def setUp(self):
super(BaseFunctionalTestCase, self).setUp()
logging.register_options(CONF)
setup_logging(self.COMPONENT_NAME)
fileutils.ensure_tree(DEFAULT_LOG_DIR, mode=0o755)
log_file = sanitize_log_path(
os.path.join(DEFAULT_LOG_DIR, "%s.txt" % self.id()))
self.flags(log_file=log_file)
privsep_helper = os.path.join(
os.getenv('VIRTUAL_ENV', os.path.dirname(sys.executable)[:-4]),
'bin', 'privsep-helper')
self.flags(
helper_command=' '.join(['sudo', '-E', privsep_helper]),
group=self.PRIVILEGED_GROUP)
def flags(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in kw.items():
CONF.set_override(k, v, group)
| apache-2.0 | -1,166,483,759,752,645,000 | 35.532847 | 79 | 0.650749 | false |
pbrandebura/Task1 | fixture/contact.py | 1 | 4977 | from model.contact import Contact
class ContactHelper:
def __init__(self, app):
self.app = app
def back_to_homepage(self):
wd = self.app.wd
if not len(wd.find_elements_by_name("Send e-Mail")) > 0:
wd.find_element_by_link_text("home").click()
def proceed_to_newuser_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/edit.php") and len(wd.find_elements_by_name("Enter")) > 0):
wd.find_element_by_link_text("add new").click()
def add_new_user(self, contact):
wd = self.app.wd
# add new contact
self.proceed_to_newuser_page()
# enter details
self.entering_details(contact)
# submit
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.back_to_homepage()
self.contact_cache = None
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def edit_first_user(self):
wd = self.app.wd
self.edit_user_by_index(0)
def edit_user_by_index(self, index, contact):
wd = self.app.wd
self.back_to_homepage()
# edit exisitng contact
self.select_contact_by_index(index)
wd.find_element_by_xpath("//*[@id='maintable']/tbody/tr["+str(index+2)+"]/td[8]/a/img").click()
# enter details
self.entering_details(contact)
# submit
wd.find_element_by_name("update").click()
self.back_to_homepage()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.select_contact_by_index(index)
wd.find_element_by_xpath("//*[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def entering_details(self, contact):
wd = self.app.wd
self.type("firstname", contact.firstname)
self.type("middlename", contact.middlename)
self.type("lastname", contact.lastname)
self.type("nickname", contact.nickname)
self.type("title", contact.usertitle)
self.type("company", contact.company)
self.type("address", contact.userAddress)
self.type("home", contact.homeNumber)
self.type("mobile", contact.mobileNumber)
self.type("work", contact.workNumber)
self.type("fax", contact.faxNumber)
self.type("email", contact.userEmail)
self.type("email2", contact.userEmail2)
self.type("email3", contact.userEmail3)
self.type("homepage", contact.userHomepage)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[%s]" % contact.bday).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[%s]" % contact.bday).click()
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[2]//option[%s]" % contact.bmonth).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[%s]" % contact.bmonth).click()
self.type("byear", contact.byear)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[%s]" % contact.aday).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[%s]" % contact.aday).click()
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[4]//option[%s]" % contact.amonth).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[%s]" % contact.amonth).click()
self.type("ayear", contact.ayear)
self.type("address2", contact.userAddress2)
self.type("phone2", contact.userPhone2)
self.type("notes", contact.userNotes)
def type(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def count(self):
wd = self.app.wd
self.back_to_homepage()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.back_to_homepage()
self.contact_cache = []
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
last_name = cells[1].text
first_name = cells[2].text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.contact_cache.append(Contact(firstname=first_name, lastname=last_name, id=id))
return list(self.contact_cache) | apache-2.0 | 7,543,241,541,158,560,000 | 40.831933 | 120 | 0.593731 | false |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Google/Spreadsheets/StructuredQuery.py | 1 | 6318 | # -*- coding: utf-8 -*-
###############################################################################
#
# StructuredQuery
# Retrieves a list-based feed containing data in your Google spreadsheet that meets a specified criteria.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class StructuredQuery(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the StructuredQuery Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(StructuredQuery, self).__init__(temboo_session, '/Library/Google/Spreadsheets/StructuredQuery')
def new_input_set(self):
return StructuredQueryInputSet()
def _make_result_set(self, result, path):
return StructuredQueryResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return StructuredQueryChoreographyExecution(session, exec_id, path)
class StructuredQueryInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the StructuredQuery
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid Access Token retrieved during the OAuth process. This is required when authenticating with OAuth unless providing the ClientID, ClientSecret, and RefreshToken.)
"""
super(StructuredQueryInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((optional, string) The Client ID provided by Google. Required when authenticating with OAuth unless providing a valid AccessToken.)
"""
super(StructuredQueryInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((optional, string) The Client Secret provided by Google. Required when authenticating with OAuth unless providing a valid AccessToken.)
"""
super(StructuredQueryInputSet, self)._set_input('ClientSecret', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) A Google App-specific password that you've generated after enabling 2-Step Verification. See Optional Inputs for OAuth.)
"""
super(StructuredQueryInputSet, self)._set_input('Password', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((required, string) A valid structured query (i.e. id>4).)
"""
super(StructuredQueryInputSet, self)._set_input('Query', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((optional, string) An OAuth Refresh Token used to generate a new Access Token when the original token is expired. Required when authenticating with OAuth unless providing a valid AccessToken.)
"""
super(StructuredQueryInputSet, self)._set_input('RefreshToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml (the default) and json.)
"""
super(StructuredQueryInputSet, self)._set_input('ResponseFormat', value)
def set_SpreadsheetKey(self, value):
"""
Set the value of the SpreadsheetKey input for this Choreo. ((required, string) The unique key of the spreadsheet associated with the feed you want to retrieve.)
"""
super(StructuredQueryInputSet, self)._set_input('SpreadsheetKey', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) Your full Google email address e.g., [email protected]. See Optional Inputs for OAuth.)
"""
super(StructuredQueryInputSet, self)._set_input('Username', value)
def set_WorksheetId(self, value):
"""
Set the value of the WorksheetId input for this Choreo. ((required, string) The unique ID of the worksheet associated with the feed you want to retrieve.)
"""
super(StructuredQueryInputSet, self)._set_input('WorksheetId', value)
class StructuredQueryResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the StructuredQuery Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class StructuredQueryChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return StructuredQueryResultSet(response, path)
| gpl-2.0 | -4,851,840,760,448,898,000 | 46.863636 | 258 | 0.687085 | false |
quantumlib/Cirq | cirq-google/cirq_google/calibration/xeb_wrapper_test.py | 1 | 4677 | # Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing.pool
import numpy as np
import pandas as pd
import pytest
import scipy.optimize
import scipy.optimize._minimize
import cirq
import cirq_google as cg
from cirq.experiments import random_rotations_between_grid_interaction_layers_circuit
from cirq.experiments.xeb_fitting import XEBPhasedFSimCharacterizationOptions
from cirq_google.calibration.phased_fsim import (
LocalXEBPhasedFSimCalibrationOptions,
LocalXEBPhasedFSimCalibrationRequest,
)
from cirq_google.calibration.xeb_wrapper import (
run_local_xeb_calibration,
_maybe_multiprocessing_pool,
)
SQRT_ISWAP = cirq.ISWAP ** -0.5
def _minimize_patch(
fun,
x0,
args=(),
method=None,
jac=None,
hess=None,
hessp=None,
bounds=None,
constraints=(),
tol=None,
callback=None,
options=None,
x0_should_be=None,
):
assert method == 'nelder-mead'
np.testing.assert_allclose(x0_should_be, x0)
return scipy.optimize.OptimizeResult(
fun=0,
nit=0,
nfev=0,
status=0,
success=True,
message='monkeypatched',
x=x0.copy(),
final_simplex=None,
)
def _benchmark_patch(*args, **kwargs):
return pd.DataFrame()
@pytest.mark.parametrize(
['fsim_options', 'x0_should_be'],
[
(
XEBPhasedFSimCharacterizationOptions(
characterize_zeta=True,
characterize_gamma=True,
characterize_chi=True,
characterize_theta=False,
characterize_phi=False,
),
[0.0, 0.0, 0.0],
),
(XEBPhasedFSimCharacterizationOptions(), [np.pi / 4, 0.0, 0.0, 0.0, 0.0]),
(
XEBPhasedFSimCharacterizationOptions(
characterize_zeta=True,
characterize_chi=True,
characterize_gamma=True,
characterize_theta=False,
characterize_phi=False,
theta_default=99,
zeta_default=0.1,
chi_default=0.2,
gamma_default=0.3,
phi_default=99,
),
[0.1, 0.2, 0.3],
),
],
)
def test_run_calibration(monkeypatch, fsim_options, x0_should_be):
def _minimize_patch_2(*args, **kwargs):
return _minimize_patch(*args, **kwargs, x0_should_be=x0_should_be)
monkeypatch.setattr('cirq.experiments.xeb_fitting.scipy.optimize.minimize', _minimize_patch_2)
monkeypatch.setattr(
'cirq_google.calibration.xeb_wrapper.xebf.benchmark_2q_xeb_fidelities', _benchmark_patch
)
qubit_indices = [
(0, 5),
(0, 6),
(1, 6),
(2, 6),
]
qubits = [cirq.GridQubit(*idx) for idx in qubit_indices]
sampler = cirq.ZerosSampler()
circuits = [
random_rotations_between_grid_interaction_layers_circuit(
qubits,
depth=depth,
two_qubit_op_factory=lambda a, b, _: SQRT_ISWAP.on(a, b),
pattern=cirq.experiments.GRID_ALIGNED_PATTERN,
seed=10,
)
for depth in [5, 10]
]
options = LocalXEBPhasedFSimCalibrationOptions(
fsim_options=fsim_options,
n_processes=1,
)
characterization_requests = []
for circuit in circuits:
_, characterization_requests = cg.prepare_characterization_for_moments(
circuit, options=options, initial=characterization_requests
)
assert len(characterization_requests) == 2
for cr in characterization_requests:
assert isinstance(cr, LocalXEBPhasedFSimCalibrationRequest)
characterizations = [
run_local_xeb_calibration(request, sampler) for request in characterization_requests
]
final_params = dict()
for c in characterizations:
final_params.update(c.parameters)
assert len(final_params) == 3 # pairs
def test_maybe_pool():
with _maybe_multiprocessing_pool(1) as pool:
assert pool is None
with _maybe_multiprocessing_pool(2) as pool:
assert isinstance(pool, multiprocessing.pool.Pool)
| apache-2.0 | -5,539,152,799,729,527,000 | 28.049689 | 98 | 0.633526 | false |
hkff/AccLab | pyAAL/ui/api.py | 1 | 22847 | """
Server API
Copyright (C) 2014 Walid Benghabrit
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from threading import Thread
from time import sleep
try:
from fodtlmon.fodtl.fodtlmon import *
except:
pass
__author__ = 'walid'
import os
from urllib.parse import *
import sys, shutil
from io import StringIO
from aalc import *
from AALtoAccmon import *
import json
base_dir = "examples"
ALLOWED_CMD = ["tspass", "aalc.py", "fotl-translate", "manage.py"]
# Filter ps
def is_cmd_allowed(cmds):
for x in ALLOWED_CMD:
if x in cmds:
return True
return False
# List dir
def api_list_dir(wpath):
tmp = "["
dirs = sorted(os.listdir(wpath)[::-1])
for d in dirs:
if d.startswith("."):
continue
tmp += '{' + '"id":"' + wpath+'/'+d + '", "text":"' + d + '"," iconCls":""'
if os.path.isdir(wpath + "/" + d):
tmp += ',"children": '
tmp += api_list_dir(wpath + '/' + d)
tmp += '},'
if tmp[-1] == ",":
tmp = tmp[:-1]
tmp += ']'
return tmp
# Read file
def api_read_file(f):
with open(base_dir + "/" + f) as fd:
return fd.read()
# Get template
def api_get_template(f):
with open(f) as fd:
return fd.read()
# Write file
def api_write_file(f, d):
res = -1
creation = False
# Add \n at the end
if d[-1] != "\n":
d += "\n"
if not os.path.isfile(base_dir + "/" + f):
creation = True
with open(base_dir + "/" + f, "w+") as fd:
res = str(fd.write(d))
check_aal_acd(f)
if creation:
Popen(['svn', "add", base_dir + "/" + f]).wait()
Popen(['svn', "commit", base_dir + "/", "-m", "'Add file %s'" % f]).wait()
else:
Popen(['svn', "commit", base_dir + "/", "-m", "'Edit file %s'" % f]).wait()
return res
# Rename file
def api_rename_file(f, new_name):
# TODO svn
os.rename(base_dir + "/" + f, base_dir + "/" + new_name)
return "RENAMED"
# Delete file
def api_delete_file(f):
file = base_dir + "/" + f
if os.path.isfile(file):
os.remove(file)
Popen(['svn', "del", file]).wait()
Popen(['svn', "commit", base_dir + "/", "-m", "'Delete file %s'" % f]).wait()
elif os.path.isdir(file):
shutil.rmtree(file)
Popen(['svn', "del", file]).wait()
Popen(['svn', "commit", base_dir + "/", "-m", "'Delete folder %s'" % f]).wait()
return "DELETED"
# Save preferences
def api_save_prefs(d):
# Add \n at the end
if d[-1] != "\n":
d += "\n"
with open("ui/prefs.json", "w+") as fd:
return str(fd.write(d))
# Load preferences
def api_load_prefs():
if not os.path.isfile("ui/prefs.json"):
api_save_prefs('{"theme": "monokai", "username": "", "fontSize": 14, "recentFiles": [] }')
with open("ui/prefs.json") as fd:
return fd.read()
# Create Folder
def api_create_folder(d):
if not os.path.exists(base_dir + "/" + d):
res = str(os.makedirs(base_dir + "/" + d))
Popen(['svn', "add", base_dir + "/" + d]).wait()
Popen(['svn', "commit", base_dir + "/", "-m", "'Add folder %s'" % d]).wait()
return res
else:
return "Directory exists !"
# Convert terminal colors to colored div
def to_html_colors(html_code: str):
html_code = html_code.replace("[91m[ERROR]", "<b style='color:red;'><span class='fa fa-times-circle' "
"style='padding-top: 2px;padding-right: 5px;'/>[ERROR]")
html_code = html_code.replace("[93m[WARNING]", "<b style='color:orange;'><span class='fa fa-exclamation-triangle'"
" style='padding-top: 2px;padding-right: 5px;'/>[WARNING]")
html_code = html_code.replace("[95mat line", "<b style='color:magenta; text-decoration: underline; "
"cursor: pointer;' class='aceLine'>at line")
html_code = html_code.replace("[91m", "<b style='color:red;'><span class='' style='padding-top: 2px;'/>")
html_code = html_code.replace("[93m", "<b style='color:orange;'><span class='' style='padding-top: 2px;'/>")
html_code = html_code.replace("[92m", "<b style='color:green;'><span class='fa fa-exclamation-triangles' "
"style='padding-top: 2px;'/>")
html_code = html_code.replace("[95m", "<b style='color:magenta;'><span class='fa fa-exclamation-triangles' "
"style='padding-top: 2px;'/>")
html_code = html_code.replace("[94m", "<b style='color:blue;'><span class='' style='padding-top: 2px;'/>")
html_code = html_code.replace("[39m", "</b>")
html_code = html_code.replace("<<", "<<")
html_code = html_code.replace(">>", ">>")
return html_code
# Compile AAL
def api_compile_aal(f):
# Save current context
sysout = sys.stdout
syserr = sys.stderr
# Capture the output
reportSIO = StringIO()
reportEIO = StringIO()
sys.stdout = reportSIO
sys.stderr = reportEIO
res = ""
try:
aalc(base_dir + "/" + f, libs_path="libs/aal/", root_path="", web=True)
except Exception as e:
res = "Compilation Error : " + str(e)
res = reportSIO.getvalue() + "\n" + reportEIO.getvalue()
# Restore context
sys.stdout = sysout
sys.stderr = syserr
print(res)
res = to_html_colors(res)
return res.replace("\n", "<br>")
# Compile tspass
def api_compile_tspass(f):
# Save current context
sysout = sys.stdout
syserr = sys.stderr
# Capture the output
reportSIO = StringIO()
reportEIO = StringIO()
sys.stdout = reportSIO
sys.stderr = reportEIO
try:
res = tspassc(file=base_dir + "/" + f, output="tmp.tspass")["print"]
except Exception as e:
res = "Compilation Error : " + str(e)
res += "\n" + reportSIO.getvalue() + "\n" + reportEIO.getvalue()
print(res)
# Restore context
sys.stdout = sysout
sys.stderr = syserr
return res.replace("\n", "<br>")
# Compile ACD
def api_compile_acd(aal, spec):
result = {"compliance": [], "sat": [], "error": ""}
tmp_file = "_tmp0001_.aal"
res = ""
try:
# Save current context
sysout = sys.stdout
syserr = sys.stderr
# Capture the output
reportSIO = StringIO()
reportEIO = StringIO()
sys.stdout = reportSIO
sys.stderr = reportEIO
api_write_file(tmp_file, aal)
res = aalc(base_dir + "/" + tmp_file, libs_path="libs/aal/", root_path="", web=False)
# Handling Sat
for c in res["mm"].aalprog.clauses:
clause = str(c.name)
tmp = validate2(res["mm"], "(always (" + c.usage.to_ltl() + "))", check=True)
result["sat"].append({clause: tmp["sat"]})
# Handling Compliance
specs = spec.split(";")
for x in specs:
x = x.strip()
sp = x.split("->")
if len(sp) == 2:
_c1 = res["mm"].clause(sp[0].strip())
_c2 = res["mm"].clause(sp[1].strip())
tmp = validate(res["mm"], _c1, _c2, resolve=False, verbose=False, use_always=False, acc_formula=0, chk='neg')
result["compliance"].append({x: tmp["ok"]})
res = reportSIO.getvalue() + "\n" + reportEIO.getvalue()
# Restore context
sys.stdout = sysout
sys.stderr = syserr
except Exception as e:
result["error"] += "\nCompilation Error : " + str(e) + "\n"
finally:
result["error"] += res
result["error"] = to_html_colors(result["error"].replace("\n", "</br>"))
api_delete_file(tmp_file)
return json.dumps(result)
# Get AAL declaration in JSON format
def api_get_aal_dec(f):
try:
mm = aalc(base_dir + "/" + f, libs_path="libs/aal/", root_path="", no_exec=True, web=True)["mm"]
except:
# Compilation Error
return '{"agents" : [], "services" : [], "types" : [], "data" : [], "clauses" : [], "dataTypes" : [], "actorTypes" : []}'
agents = ",".join(mm.get_declared(dtype="agents"))
services = ",".join(mm.get_declared(dtype="services"))
data = ",".join(mm.get_declared(dtype="data"))
tts = mm.aalprog.get_declared(m_type)
types = ",".join(mm.get_declared(dtype="types"))
# Filter by data type / actor type
actorTypes = ",".join(['"' + str(x.name) + '"' for x in list(filter(lambda x: x.subtype_of("Actor"), tts))])
dataTypes = ",".join(['"' + str(x.name) + '"' for x in list(filter(lambda x: x.subtype_of("data"), tts))])
clauses = ",".join(['"' + str(x.name) + '"' for x in mm.aalprog.clauses])
res = '{"agents" : [' + agents + '], "services" : [' + services + '], "types" : [' + \
types + '], "data" : [' + data + '], "clauses" : [' + clauses + ']' + ', "dataTypes" : [' +\
dataTypes + ']' + ', "actorTypes" : [' + actorTypes + ']' + '}'
return res
# Get ps info
def api_monitor():
# ps -a -o user,pid,%cpu,%mem,start,time,command
p = Popen(['ps', '-aL', '-o', 'user,pid,%cpu,%mem,time,command'], stdout=PIPE, stderr=PIPE, stdin=PIPE)
sts = p.stdout.read().decode("utf-8")
sts = sts.split("\n")
sts2 = [' '.join(x.split()) for x in sts][1:]
pss = ""
for x in sts2:
x = x.split(" ")
if len(x) >= 5:
cmd = ' '.join(x[5:])
if is_cmd_allowed(cmd):
pss += (
"{"
" \"user\": \"" + x[0] + "\","
" \"pid\" : \"" + x[1] + "\","
" \"cpu\" : \"" + x[2] + "\","
" \"mem\" : \"" + x[3] + "\","
" \"time\": \"" + x[4] + "\","
" \"cmd\" : \"" + cmd + "\" "
"},"
)
pss = pss[:-1]
json = "{\"ps\" : [ " + pss + " ]}"
return json
# kill process by id
def api_kill_ps(pid):
p = Popen(['kill', '-KILL', pid], stdout=PIPE, stderr=PIPE, stdin=PIPE)
return p.stdout.read().decode("utf-8")
# Macro API
def api_macro_call(f, macro_name, macro_args):
res = ""
try:
res = aalc(base_dir + "/" + f, libs_path="libs/aal/", root_path="", web=True)
# Save current context
sysout = sys.stdout
syserr = sys.stderr
# Capture the output
reportSIO = StringIO()
reportEIO = StringIO()
sys.stdout = reportSIO
sys.stderr = reportEIO
res["mm"].call(macro_name, macro_args[1:-1].split(','))
res = reportSIO.getvalue() + "\n" + reportEIO.getvalue()
# Restore context
sys.stdout = sysout
sys.stderr = syserr
except Exception as e:
res = "Compilation Error : " + str(e)
print(res)
res = to_html_colors(res)
return res.replace("\n", "<br>")
# Gen accmon
def api_gen_accmon(file, spec):
try:
mspec = MappingSpec()
tmp = spec.split(";")
for x in tmp:
tmp2 = x.split(":")
if len(tmp2) > 1:
args = tmp2[1].split("=>")
if tmp2[0] == "clause":
if len(args) > 2:
mspec.clauses.append(MappingSpec.ClauseMap(args[0], args[1], args[2]))
elif tmp2[0] == "service":
if len(args) > 1:
mspec.services.append(MappingSpec.ServiceMap(args[0], args[1]))
elif tmp2[0] == "agent":
if len(args) > 1:
mspec.agents.append(MappingSpec.AgentMap(args[0], args[1]))
elif tmp2[0] == "type":
if len(args) > 1:
mspec.types.append(MappingSpec.TypeMap(args[0], args[1]))
mm = aalc(base_dir + "/" + file, libs_path="libs/aal/", root_path="", no_exec=True, web=True)["mm"]
res = AALtoDJFODTLMON(mm, mspec)
file_name = file.replace('.aal', '_rules.py')
api_write_file(file_name, res)
return file_name
except:
# Compilation Error
return 'Error'
# Generate django app skeleton
def api_generate_django(aal_file, spec_file, output_folder):
return generate_django_skeleton(aal_file, spec_file, output_folder)
# Run django app
def api_run_django(app, port=9000):
p = Popen(['python3', base_dir + "/"+app, 'migrate'], stdout=PIPE, stderr=PIPE, stdin=PIPE)
# p = Popen(['python3', base_dir + "/"+app, 'runserver', str(port)], stdout=PIPE, stderr=PIPE, stdin=PIPE)
# IMPORTANT: Run the server using non blocking IO in order to capture errors and show them to the client
from queue import Queue, Empty
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_output(out, err, queue):
for line in iter(out.readline, b''):
queue.put(line.decode("utf-8"))
out.close()
for line in iter(err.readline, b''):
queue.put(line.decode("utf-8"))
err.close()
p = Popen(['python3', base_dir + "/"+app, 'runserver', str(port)], stdout=PIPE, stderr=PIPE, stdin=PIPE,
bufsize=1, close_fds=ON_POSIX)
q = Queue()
t = Thread(target=enqueue_output, args=(p.stdout, p.stderr, q))
t.daemon = True
t.start()
# Wait to get some data
sleep(5)
# Get output
items = []
max = 100
for numOfItemsRetrieved in range(0, max):
try:
if numOfItemsRetrieved == max:
break
items.append(q.get_nowait())
except Exception:
break
print("=====================================")
print("".join(items))
print("=====================================")
return "".join(items).replace("\n", "<br>")
# Convert Fodtl formula to vFodtl diagram
def api_fodtl_to_vfodtl(formula):
print(formula)
try:
from fodtlmon.parser.Parser import FodtlParser
except:
return "fodtlmon is not installed !"
try:
def prg(formula):
res = ""
js_class = "Fodtl_%s" % formula.__class__.__name__.lower()
if isinstance(formula, Predicate):
arguments = []
for x in formula.args:
arguments.append(prg(x))
res = '{ "%s": [%s] }' % (js_class, ",".join(arguments))
elif isinstance(formula, Constant):
res = '{ "%s": {"Fodtl_value": "%s"} }' % (js_class, formula.name)
elif isinstance(formula, Variable):
res = '{ "%s": {"Fodtl_value": "%s"} }' % (js_class, formula.name)
elif isinstance(formula, At):
pass
elif isinstance(formula, Forall):
pass
elif isinstance(formula, Exists):
pass
elif isinstance(formula, true) or isinstance(formula, false):
res = '{ "%s": "" }' % js_class
elif isinstance(formula, UExp):
inner = prg(formula.inner)
res = '{"%s" : %s}' % (js_class, inner)
elif isinstance(formula, BExp):
exp1 = prg(formula.left)
exp2 = prg(formula.right)
res = '{ "%s" : [%s, %s] }' % (js_class, exp1, exp2)
else:
raise Exception("Error %s of type %s" % (formula, type(formula)))
return res
f = FodtlParser.parse(formula)
res = prg(f)
return res
except Exception as e:
return "%s" % e
# Register formula in accmon
def api_register_accmon_monitor(formula, mon_name, accmon_url):
import urllib.request, urllib.parse
res = "Error"
values = {'formula_id': mon_name, 'formula': formula}
data = urllib.parse.urlencode(values)
data = data.encode('ascii') # data should be bytes
url = accmon_url + "/sysmon/remote/register_formula/"
with urllib.request.urlopen(url, data) as response:
res = str(response.read())
print(res)
return res
# Transform a clause into Fodtl formula
def api_clause_to_fodtl(file, clause):
res = "Error"
mm = aalc(base_dir + "/" + file, libs_path="libs/aal/", root_path="", no_exec=True, web=True)["mm"]
if mm is not None:
c = mm.clause(clause)
if c is not None:
res = aal_clause_to_fodtl(c)
return res
# Check and update the corresponding acd/aal file
def check_aal_acd(file):
def get_clause_node_from_json(acd, clause):
for x in acd:
if x["type"] == "PolicyUI":
c_name = re.search(r'CLAUSE \w+', x["policy"]).group(0).replace("CLAUSE ", "")
if c_name == clause:
return x
return None
ext = file.split(".")[-1]
if ext == "acd":
# Check acd file
aal_file_name = base_dir+"/"+file.replace(".acd", ".aal")
acd_file_name = base_dir+"/"+file
if os.path.isfile(aal_file_name): # The corresponding aal file exists
acd_file = ""
aal_file = ""
# Read acd and aal files
with open(acd_file_name, "r") as f:
acd_file = json.loads(f.read())
with open(aal_file_name, "r") as f:
aal_file = f.read()
mm = aalc(aal_file_name, libs_path="libs/aal/", root_path="", no_exec=True, web=False)["mm"]
if mm is not None:
inputfile = FileStream(aal_file_name)
# Update aal file
for x in acd_file:
if x["type"] == "PolicyUI":
clause_name = re.search(r'CLAUSE \w+', x["policy"]).group(0)
if clause_name not in aal_file: # Add the clause in the aal file
aal_file += "\n" + x["policy"]
else: # Update the clause
cl = mm.clause(clause_name.replace("CLAUSE ", ""))
if cl is not None:
rng = cl.source_range
original_clause = inputfile.getText(rng[0], rng[1])
if x["policy"] != original_clause:
aal_file = aal_file.replace(original_clause, x["policy"])
# TODO remove deleted clause
# Save aal file
with open(aal_file_name, "w") as f:
f.write(aal_file)
elif ext == "aal":
# Check aal file
aal_file_name = base_dir+"/"+file
acd_file_name = base_dir+"/"+file.replace(".aal", ".acd")
if os.path.isfile(acd_file_name): # The corresponding acd file exists
acd_file = ""
aal_file = ""
# Read acd and aal files
with open(acd_file_name, "r") as f:
acd_file = json.loads(f.read())
with open(aal_file_name, "r") as f:
aal_file = f.read()
mm = aalc(aal_file_name, libs_path="libs/aal/", root_path="", no_exec=True, web=False)["mm"]
if mm is not None:
inputfile = FileStream(aal_file_name)
# Update acd file
for x in mm.aalprog.clauses:
c = get_clause_node_from_json(acd_file, str(x.name))
if c is not None:
rng = x.source_range
original_clause = inputfile.getText(rng[0], rng[1])
if c["policy"] != original_clause:
c["policy"] = original_clause
# Save acd file
with open(acd_file_name, "w") as f:
json.dump(acd_file, f)
# SVN
def svn_init():
if not os.path.isdir(".workspace"):
print(" - Creating svn repo at .workspace")
p = Popen(['svnadmin', "create", ".workspace"])
p.wait()
svn_path = "file://%s" % os.path.realpath(__file__).replace("ui/api.py", ".workspace")
p = Popen(['svn', "import", base_dir + "/", svn_path, "-m", "'Initial commit'"])
p.wait()
p = Popen(['svn', "checkout", "--force", svn_path, base_dir + "/"])
p.wait()
# Svn log
def svn_log(target):
p = Popen(['svn', "up", base_dir + "/"])
p.wait()
p = Popen(['svn', "log", base_dir + "/" + target, "--xml"], stdout=PIPE, stderr=PIPE, stdin=PIPE)
p.wait()
log = p.stdout.read().decode("utf-8")
return log
# Svn revert
def svn_revert(target, version):
p = Popen(['svn', "merge", "-r", "HEAD:%s" % version, "%s" % target], cwd=base_dir+'/')
p.wait()
p = Popen(['svn', "commit", "-m", "Rolled back to r%s" % version, "%s/%s" % (base_dir, target)], stdout=PIPE, stderr=PIPE, stdin=PIPE)
p.wait()
log = p.stdout.read().decode("utf-8")
return log
# Svn diff
def svn_diff(target, r1, r2):
print("%s %s" % (r1, r2))
try:
r1 = int(r1)
r2 = int(r2)
except:
r1 = 1
r2 = 1
r1 = r1 if r1 >= 1 else 1
r2 = r2 if r2 >= 1 else 1
# p = Popen(['svn', "up", base_dir + "/"])
# p.wait()
p = Popen(['svn', "diff", base_dir + "/" + target, "-r", "%s:%s" %(r1, r2)], stdout=PIPE, stderr=PIPE, stdin=PIPE)
p.wait()
log = p.stdout.read().decode("utf-8").replace("\n", "<br>")
return log
# Fodtlmon web service
def start_fodtlmon_server(server_port=9999):
import os
from subprocess import Popen
Popen(['python3', 'ui/mon.py', server_port])
sleep(1)
return server_port
# Translate AAL clause to FODTL
def aal_to_fodtl(file, clause):
res = "Error"
mm = aalc(base_dir + "/" + file, libs_path="libs/aal/", root_path="", no_exec=True, web=True)["mm"]
if mm is not None:
c = mm.clause(clause)
if c is not None:
res = aal_clause_to_fodtl(c)
return res
# Get AAL behaviors
def get_aal_behaviors(file):
from simul.SimulGenerator import m_behavior_to_behavior
import json
res = []
mm = aalc(base_dir + "/" + file, libs_path="libs/aal/", root_path="", no_exec=True, web=True)["mm"]
if mm is not None:
behaviors = mm.aalprog.get_behaviors()
for b in behaviors:
res.append(json.dumps(m_behavior_to_behavior(b, b.name).to_json()))
return json.dumps(res)
| gpl-3.0 | 3,545,888,401,039,262,700 | 31.777618 | 138 | 0.521404 | false |
remiscarlet/RandomKCWikiScripts | kcwiki-web/kcwiki/Python Scripts/mw-scripts/mediawikinuker.py | 1 | 1857 | # -*- coding: UTF-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import requests
import re
import os
from lxml import etree
fileLoc = os.path.join("/","Users","YutoTakamoto","Dropbox","YutoProgramming","python","pagesToNuke.txt")
baseURL = "http://en.kancollewiki.net/api.php?"
searchSize = 50
searchParams = ["@comment"]
searchProps = ["timestamp","wordcount","size"]
nameSpaces = ["1","201","203"]
wordSizeLimit = 1000
totalHits = 0
reason = "Deleting clutter."
params = {
"action":"query",
"list":"search",
"srsearch":"+".join(searchParams),
"srprop":"|".join(searchProps),
"srnamespace":"|".join(nameSpaces),
"srlimit":str(searchSize),
"sroffset":"0",
"format":"xml"
}
class HitsParser(object):
def start(self, tag, attrib):
if (tag == "searchinfo"):
self.totalHits = attrib["totalhits"]
def close(self):
pass
class Parser(object):
def start(self, tag, attrib):
if (tag == "p"):
if attrib["size"].isdigit() and attrib["wordcount"].isdigit():
if int(attrib["wordcount"])<wordSizeLimit:
# pass
self.file.write(attrib["title"]+"|"+self.reason+"\n")
#print attrib
def close(self):
pass
url = baseURL
for param,val in params.items():
url+="&"+param+"="+val
request = requests.get(url)
f = open(fileLoc,"w")
hitParser = etree.XMLParser(target = HitsParser())
result = etree.XML(request.text,hitParser)
totalHits = int(hitParser.target.totalHits)
print totalHits
parser = etree.XMLParser(target = Parser())
parser.target.file = f
parser.target.reason = reason
etree.XML(request.text,parser)
totalHits = totalHits if totalHits<5000 else 5000
for offset in xrange(0,totalHits,searchSize):
#break
params["sroffset"] = str(offset)
url = baseURL
for param,val in params.items():
url+="&"+param+"="+val
print url
req = requests.get(url)
etree.XML(req.text,parser)
parser.target.file.close()
| gpl-2.0 | -545,714,957,443,156,200 | 23.434211 | 105 | 0.690361 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.