repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
StuartAxelOwen/join | join/_join_funcs.py | 1 | 2328 | from functools import partial
__author__ = 'stuart'
def get_object_attrs(obj):
if hasattr(obj, '__dict__'):
return obj.__dict__
elif hasattr(obj, '__slots__'):
return {key: getattr(obj, key) for key in obj.__slots__}
else:
return {}
class Union(object):
def __init__(self, attributes):
if isinstance(attributes, dict):
for name, value in attributes.items():
setattr(self, name, value)
else:
for name, value in attributes:
setattr(self, name, value)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.__dict__)
def tuple_join(left, right):
"""
Returns a tuple of the joined objects
>>> tuple_join(1, '2')
(1, '2')
:param left: left object to be joined with right
:param right: right object to be joined with left
:return: tuple containing both join parents
"""
return left, right
def union_join(left, right, left_as='left', right_as='right'):
"""
Join function truest to the SQL style join. Merges both objects together in a sum-type,
saving references to each parent in ``left`` and ``right`` attributes.
>>> Dog = namedtuple('Dog', ['name', 'woof', 'weight'])
>>> dog = Dog('gatsby', 'Ruff!', 15)
>>> Cat = namedtuple('Cat', ['name', 'meow', 'weight'])
>>> cat = Cat('pleo', 'roooowwwr', 12)
>>> catdog = union_join(cat, dog, 'cat', 'dog')
>>> catdog.name
pleo
>>> catdog.woof
Ruff!
>>> catdog.dog.name
gatsby
:param left: left object to be joined with right
:param right: right object to be joined with left
:return: joined object with attrs/methods from both parents available
"""
attrs = {}
attrs.update(get_object_attrs(right))
attrs.update(get_object_attrs(left))
attrs[left_as] = left
attrs[right_as] = right
if isinstance(left, dict) and isinstance(right, dict):
return attrs
else:
joined_class = type(left.__class__.__name__ + right.__class__.__name__, (Union,),
{})
return joined_class(attrs)
def make_union_join(left_as='left', right_as='right'):
return partial(union_join, left_as=left_as, right_as=right_as)
| mit | -4,050,428,239,922,643,500 | 29.233766 | 92 | 0.572165 | false |
soylentdeen/Graffity | src/ErrorBudgetAnalysis.py | 1 | 7600 | import Graffity
import numpy
import scipy
import matplotlib.pyplot as pyplot
wave = 632.8
ciao = Graffity.WFS(wavelength=1800.0)
var = numpy.array([False, False, True, True, True])
offsets = []
x = 0
for v in var:
if v:
offsets.append(x)
x+= 1
else:
offsets.append(0)
zern = [0.0, 0.0, 0.0, 0.0, 0.0]
pupil = [0.0, 0.0]
actPoke = numpy.zeros(60, dtype=numpy.float32)
derotAngle = 0.00
clockingAngle = 0.0
# Take the flat-wavefront image
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
if var[0]:
f0 = pyplot.figure(0)
f0.clear()
ax0 = f0.add_axes([0.1, 0.1, 0.8, 0.8])
if var[1]:
f1 = pyplot.figure(1)
f1.clear()
ax1 = f1.add_axes([0.1, 0.1, 0.8, 0.8])
if var[2]:
f2 = pyplot.figure(2)
f2.clear()
ax2 = f2.add_axes([0.1, 0.1, 0.8, 0.8])
if var[3]:
f3 = pyplot.figure(3)
f3.clear()
ax3 = f3.add_axes([0.1, 0.1, 0.8, 0.8])
if var[4]:
f4 = pyplot.figure(4)
f4.clear()
ax4 = f4.add_axes([0.1, 0.1, 0.8, 0.8])
f5 = pyplot.figure(5)
f5.clear()
f6 = pyplot.figure(6)
f6.clear()
ax5 = f5.add_axes([0.1, 0.1, 0.8, 0.8])
ax6 = f6.add_axes([0.1, 0.1, 0.8, 0.8])
wferror = numpy.linspace(-2.0*wave, 2.0*wave, num=14)
clockingAngle = 0.00
for rms in wferror:
print rms
if var[0]:
zern = [rms, 0.0, 0.0, 0.0, 0.0]
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
if var[1]:
zern = [0.0, rms, 0.0, 0.0, 0.0]
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
if var[2]:
zern = [0.0, 0.0, rms, 0.0, 0.0]
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
if var[3]:
zern = [0.0, 0.0, 0.0, rms, 0.0]
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
if var[4]:
zern = [0.0, 0.0, 0.0, 0.0, rms]
ciao.setupInstrument(zern, pupil, actPoke, derotAngle, clockingAngle)
ciao.expose()
centroids = numpy.array(ciao.centroids)
nvars = len(var[var==True])
flat = centroids[0]
if var[0]:
tip = centroids[[i*nvars+offsets[0]+1 for i in range(len(wferror))]]-flat
if var[1]:
tilt = centroids[[i*nvars+offsets[1]+1 for i in range(len(wferror))]]-flat
if var[2]:
focus = centroids[[i*nvars+offsets[2]+1 for i in range(len(wferror))]]-flat
if var[3]:
astig1 = centroids[[i*nvars+offsets[3]+1 for i in range(len(wferror))]]-flat
if var[4]:
astig2 = centroids[[i*nvars+offsets[4]+1 for i in range(len(wferror))]]-flat
colorMap = pyplot.get_cmap()
colors = [colorMap(i) for i in numpy.linspace(0, 1, len(wferror))]
subapnum = range(68)
rms_x = []
rms_y = []
max_x = []
max_y = []
for i in range(len(wferror)):
rx = []
ry = []
mx = []
my = []
if var[0]:
ax0.plot(subapnum, tip[:,:,0][i], color=colors[i], marker='o')
ax0.plot(subapnum, tip[:,:,1][i], color=colors[i], marker='+')
rx.append(numpy.sqrt(numpy.average(tip[:,:,0][i]**2.0)))
ry.append(numpy.sqrt(numpy.average(tip[:,:,1][i]**2.0)))
mx.append(numpy.max(numpy.abs(tip[:,:,0][i])))
my.append(numpy.max(numpy.abs(tip[:,:,1][i])))
if var[1]:
ax1.plot(subapnum, tilt[:,:,0][i], color=colors[i], marker='o')
ax1.plot(subapnum, tilt[:,:,1][i], color=colors[i], marker='+')
rx.append(numpy.sqrt(numpy.average(tilt[:,:,0][i]**2.0)))
ry.append(numpy.sqrt(numpy.average(tilt[:,:,1][i]**2.0)))
mx.append(numpy.max(numpy.abs(tilt[:,:,0][i])))
my.append(numpy.max(numpy.abs(tilt[:,:,1][i])))
if var[2]:
ax2.plot(subapnum, focus[:,:,0][i], color=colors[i], marker='o')
ax2.plot(subapnum, focus[:,:,1][i], color=colors[i], marker='+')
rx.append(numpy.sqrt(numpy.average(focus[:,:,0][i]**2.0)))
ry.append(numpy.sqrt(numpy.average(focus[:,:,1][i]**2.0)))
mx.append(numpy.max(numpy.abs(focus[:,:,0][i])))
my.append(numpy.max(numpy.abs(focus[:,:,1][i])))
if var[3]:
ax3.plot(subapnum, astig1[:,:,0][i], color=colors[i], marker='o')
ax3.plot(subapnum, astig1[:,:,1][i], color=colors[i], marker='+')
rx.append(numpy.sqrt(numpy.average(astig1[:,:,0][i]**2.0)))
ry.append(numpy.sqrt(numpy.average(astig1[:,:,1][i]**2.0)))
mx.append(numpy.max(numpy.abs(astig1[:,:,0][i])))
my.append(numpy.max(numpy.abs(astig1[:,:,1][i])))
if var[4]:
ax4.plot(subapnum, astig2[:,:,0][i], color=colors[i], marker='o')
ax4.plot(subapnum, astig2[:,:,1][i], color=colors[i], marker='+')
rx.append(numpy.sqrt(numpy.average(astig2[:,:,0][i]**2.0)))
ry.append(numpy.sqrt(numpy.average(astig2[:,:,1][i]**2.0)))
mx.append(numpy.max(numpy.abs(astig2[:,:,0][i])))
my.append(numpy.max(numpy.abs(astig2[:,:,1][i])))
rms_x.append(rx)
rms_y.append(ry)
max_x.append(mx)
max_y.append(my)
rms_x = numpy.array(rms_x).transpose()
rms_y = numpy.array(rms_y).transpose()
max_x = numpy.array(max_x).transpose()
max_y = numpy.array(max_y).transpose()
labels = []
lines = []
if var[0]:
lines.append(ax5.plot(wferror, max_x[offsets[0]], color = 'b', marker = 'o')[0])
ax6.plot(wferror, max_y[offsets[0]], color = 'b', marker = 'o')
labels.append["Tip"]
#ax5.plot(wferror, rms_x[0], color = 'b', marker = '+')
if var[1]:
lines.append(ax5.plot(wferror, max_x[offsets[1]], color = 'g', marker = 'o')[0])
ax6.plot(wferror, max_y[offsets[1]], color = 'g', marker = 'o')
labels.append["Tilt"]
#ax5.plot(wferror, rms_x[1], color = 'g', marker = '+')
if var[2]:
lines.append(ax5.plot(wferror, max_x[offsets[2]], color = 'r', marker = 'o')[0])
ax6.plot(wferror, max_y[offsets[2]], color = 'r', marker = 'o')
labels.append("Focus")
#ax5.plot(wferror, rms_x[2], color = 'r', marker = '+')
if var[3]:
lines.append(ax5.plot(wferror, max_x[offsets[3]], color = 'c', marker = 'o')[0])
ax6.plot(wferror, max_y[offsets[3]], color = 'c', marker = 'o')
labels.append("Astig1")
#ax5.plot(wferror, rms_x[3], color = 'c', marker = '+')
if var[4]:
lines.append(ax5.plot(wferror, max_x[offsets[4]], color = 'm', marker = 'o')[0])
ax6.plot(wferror, max_y[offsets[4]], color = 'm', marker = 'o')
labels.append("Astig2")
#ax5.plot(wferror, rms_x[4], color = 'm', marker = '+')
ax5.set_xlabel("RMS Wavefront Error (nm)")
ax5.set_ylabel("Maximum X Slope (pixels)")
f5.legend(lines, labels)
ax5.set_title('X Slopes')
ax6.set_xlabel("RMS Wavefront Error (nm)")
ax6.set_ylabel("Maximum Y Slope (pixels)")
f6.legend(lines, labels)
ax6.set_title('Y Slopes')
if var[0]:
ax0.set_xlabel("Subaperture Number")
ax0.set_ylabel("Slopes (Pixels)")
ax0.set_title('Tip')
f0.show()
f0.savefig('tip.png')
if var[1]:
ax1.set_xlabel("Subaperture Number")
ax1.set_ylabel("Slopes (Pixels)")
ax1.set_title('Tilt')
f1.show()
f1.savefig('tilt.png')
if var[2]:
ax2.set_xlabel("Subaperture Number")
ax2.set_ylabel("Slopes (Pixels)")
ax2.set_title('Focus')
f2.show()
f2.savefig('focus.png')
if var[3]:
ax3.set_xlabel("Subaperture Number")
ax3.set_ylabel("Slopes (Pixels)")
ax3.set_title('Oblique Astigmatism')
f3.show()
f3.savefig('ObliqAstig.png')
if var[4]:
ax4.set_xlabel("Subaperture Number")
ax4.set_ylabel("Slopes (Pixels)")
ax4.set_title('Vertical Astigmatism')
f4.show()
f4.savefig('VertAstig.png')
f5.show()
f5.savefig('Xerror.png')
f6.show()
f6.savefig('Yerror.png')
| mit | -3,690,607,500,366,776,000 | 31.478632 | 84 | 0.589474 | false |
jbvsmo/discoder | discoder/lib/parse.py | 1 | 2685 | # coding: utf-8
""" Copyright (c) 2013 João Bernardo Vianna Oliveira
This file is part of Discoder.
Discoder is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Discoder is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Discoder. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'jb'
from discoder.lib import Obj
import re
class ParseError(Exception):
pass
tag = Obj(
# Regex for parsing the markup language generated by ffprobe or avprobe
open = re.compile(r'^\[(\w+)\]$'),
close = re.compile(r'^\[/(\w+)\]$'),
value = re.compile(r'^(\w+)(?::(\w+))?=(.*)$')
)
def probe(text):
"""
Parse multiline text generated by `ffprobe` or `avprobe`
Command line:
ffprobe -v quiet [-show_format] [-show_streams] filename
:type text: str
Input:
------
[TAG]
data_x=1
data_y=2
INFO:data_z=3
[/TAG]
Output:
-------
{'tag': [{'data_x': 1, 'data_y': 2, 'info': {'data_z': 3}}]}
"""
blocks = Obj()
this = None
for i, line in enumerate(text.splitlines()):
if not line.strip():
continue
open_block = tag.open.match(line)
if open_block:
if this is not None:
raise ParseError('Opened block without closing last one: {0}: {1}'.format(i, line))
this = Obj()
name = open_block.group(1).lower()
if name == 'stream':
name += 's' # compatibility with json output
if name != 'format': # "format" only has one element.
blocks.setdefault(name, []).append(this)
else:
blocks[name] = this
else:
if this is None:
raise ParseError("There's no block to insert data or close: {0}: {1}".format(i, line))
if tag.close.match(line):
this = None
else:
name, sub, val = tag.value.match(line).groups()
if not sub:
this[name] = val
else:
attr = this.setdefault(name.lower(), Obj())
attr[sub] = val
return blocks
| gpl-3.0 | 4,299,707,843,823,941,600 | 30.952381 | 102 | 0.548808 | false |
Fokko/incubator-airflow | tests/providers/google/cloud/operators/test_vision_system.py | 1 | 2023 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tests.gcp.utils.gcp_authenticator import GCP_AI_KEY
from tests.providers.google.cloud.operators.test_vision_system_helper import GCPVisionTestHelper
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, provide_gcp_context, skip_gcp_system
from tests.test_utils.system_tests_class import SystemTest
VISION_HELPER = GCPVisionTestHelper()
@skip_gcp_system(GCP_AI_KEY)
class CloudVisionExampleDagsSystemTest(SystemTest):
@provide_gcp_context(GCP_AI_KEY)
def setUp(self):
super().setUp()
VISION_HELPER.create_bucket()
@provide_gcp_context(GCP_AI_KEY)
def tearDown(self):
VISION_HELPER.delete_bucket()
super().tearDown()
@provide_gcp_context(GCP_AI_KEY)
def test_run_example_gcp_vision_autogenerated_id_dag(self):
self.run_dag('example_gcp_vision_autogenerated_id', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_AI_KEY)
def test_run_example_gcp_vision_explicit_id_dag(self):
self.run_dag('example_gcp_vision_explicit_id', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_AI_KEY)
def test_run_example_gcp_vision_annotate_image_dag(self):
self.run_dag('example_gcp_vision_annotate_image', CLOUD_DAG_FOLDER)
| apache-2.0 | 7,918,690,223,469,108,000 | 39.46 | 102 | 0.741473 | false |
kubow/HAC | System/UI74KW.py | 1 | 2115 | #!/usr/bin/python3
import os.path
from kivy.resources import resource_add_path
KV_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__)))
resource_add_path(KV_PATH)
print(KV_PATH)
#import kivy
#kivy.require('1.7.1')
from kivy.lang import Builder
Builder.load_file('H808E.kv')
from kivy.app import App
from kivy.properties import ObjectProperty
#from kiwi.uix.scatter import Scatter
from kivy.uix.listview import ListItemButton
#from kivy.adapters.listadapter import ListAdapter
from kivy.uix.boxlayout import BoxLayout
#from kiwi.uix.floatlayout import FloatLayout #good na 3d
from kivy.uix.gridlayout import GridLayout
from kivy.properties import ListProperty, StringProperty
from OS74 import FileSystemObject
class ShowEnc(GridLayout):
main_text = ObjectProperty(None)
folder_list = ListProperty([])
folder_select = StringProperty('Select a folder')
file_list = ListProperty([])
file_select = StringProperty('Select a file')
fldr_lib, file_lib = FileSystemObject().object_read_split()
actual_location = FileSystemObject().path
def multimedia_content(self):
print(self.actual_location)
directory = FileSystemObject(self.actual_location).dir_up(1)
self.fldr_lib, self.file_lib = FileSystemObject(directory).object_read_split()
print(directory)
# clear the lists content
self.file_list.adapter.data[:]
self.folder_list.adapter.data[:]
# append new data
self.file_list.append(self.file_lib)
self.folder_list.append(self.fldr_lib)
def folder_on_select(self, change_value):
self.selected_value = "Selected: {0}".format(change_value.text)
print(self.selected_value)
def file_on_select(self, change_value):
self.selected_value = "Selected: {0}".format(change_value.text)
print(self.selected_value)
def clear(self):
self.main_text.text = ""
self.main_text.focus = True
class MainApp(App):
title = 'H808E'
def build(self):
return ShowEnc()
if __name__ == '__main__':
MainApp().run()
| unlicense | -7,743,993,530,130,653,000 | 29.652174 | 86 | 0.689362 | false |
jmrozanec/white-bkg-classification | scripts/dl-histograms/04-architecture-02b.py | 1 | 2150 | #TFLearn bug regarding image loading: https://github.com/tflearn/tflearn/issues/180
#Monochromes img-magick: https://poizan.dk/blog/2014/02/28/monochrome-images-in-imagemagick/
#How to persist a model: https://github.com/tflearn/tflearn/blob/master/examples/basics/weights_persistence.py
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization, batch_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import image_preloader
train_file = '../../images/sampling/dataset-splits/train-cv-5.txt'
test_file = '../../images/sampling/dataset-splits/test-cv-5.txt'
channels=1
width=64
height=50
X, Y = image_preloader(train_file, image_shape=(height, width), mode='file', categorical_labels=True, normalize=True)
testX, testY = image_preloader(test_file, image_shape=(height, width), mode='file', categorical_labels=True, normalize=True)
network = input_data(shape=[None, width, height], name='input')
network = tflearn.layers.core.reshape(network, [-1, width, height, 1], name='Reshape')
network = batch_normalization(network)
network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = batch_normalization(network)
network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = batch_normalization(network)
network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = fully_connected(network, 2, activation='softmax')
# Build neural network and train
network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(X, Y, n_epoch=5, validation_set=(testX, testY), snapshot_step=10, snapshot_epoch=False, show_metric=True, run_id='white-bkg-2')
#epoch=4 => 98%
| apache-2.0 | 5,411,774,150,972,732,000 | 50.190476 | 137 | 0.76186 | false |
PatentBlocker/Motorola_Patent_Citations | src/get_citations.py | 1 | 1772 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 16:08:56 2016
@author: Thibault
"""
import pandas as pd
import numpy as np
# Loading the data
data_dir = '../data'
# training and validation sets
train_file = data_dir + '/blocking1114.csv'
# Opening the blocking data
TrainFile = pd.read_csv(train_file, header=None)
TrainFile.columns = ['Application', 'Patent_Blocking']
# Opening the Portfolio database
portf = data_dir + '/SamplePortfolioforBerkeley.csv'
Moto_database = pd.read_csv(portf, sep=',')
# Creating the query
Moto_Patents = np.asarray(Moto_database['Patent #'])
# Returns
def foo(s1):
return "'{}'".format(s1)
def query(table):
query = 'SELECT uspatentcitation.citation_id, uspatentcitation.patent_id FROM uspatentcitation WHERE uspatentcitation.citation_id='
for k in table:
if k != table[-1]:
query += foo(str(k)) + ' OR uspatentapplication.patent_id='
else:
query += foo(str(k))
return query
print(query(Moto_Patents))
# Connecting to the server
"NEED TO CONNECT TO SQL DATABASE USING MySQL"
# Doing the query to get the database
"""
SELECT uspatentcitation.citation_id, uspatentcitation.patent_id
FROM uspatentcitation
WHERE uspatentcitation.citation_id='7046910'
OR uspatentcitation.citation_id='5903133'
OR uspatentcitation.citation_id='8395587'
OR uspatentcitation.citation_id='6408436'
OR uspatentcitation.citation_id='7190956'
OR uspatentcitation.citation_id='6778512'
OR uspatentcitation.citation_id='5794185'
OR uspatentcitation.citation_id='6592696'
OR uspatentcitation.citation_id='8078203'
OR uspatentcitation.citation_id='8229428'
OR uspatentcitation.citation_id='7555696'
OR uspatentcitation.citation_id='5946653'
OR uspatentcitation.citation_id='7675970'
""""
| bsd-2-clause | -3,259,559,936,448,086,000 | 22.315789 | 135 | 0.731941 | false |
public/python-super3 | test.py | 1 | 2905 | import time
import inspect
import random
from super3 import more_super3 as super3, callable_super3
def super_proxy(self, type):
py_super = super(type, self)
my_super = super3(caller=inspect.currentframe().f_back)
assert py_super.__str__() == my_super.__str__() \
or (py_super.__self_class__ == my_super.__self_class__ and \
py_super.__self__ == my_super.__self__ and \
py_super.__thisclass__ == my_super.__thisclass__)
return my_super
def with_super_result(func):
def decorated(self, *args, **kwargs):
s = super3()
r = getattr(s, func.__name__)()
return func(self, r)
return decorated
class A(object):
def f(self):
return A
class A2(object):
def f(self):
return A2
class A3(A):
@with_super_result
def f(self, super):
return A3
class B(A):
pass
class C(B):
def f(self):
return super_proxy(self, C).f()
class D(C, A2):
def f(self):
return C.f(self)
class E(C, A, A2):
pass
class F(E):
def f(self):
return super_proxy(self, F).f()
class G(F, E, A):
def f(self):
r = super_proxy(self, G).f()
assert r == A
return F.f(self)
class H(G):
@with_super_result
def f(self, super):
return H
class I(H):
@with_super_result
def f(self, super):
assert super == H
return I
class J(A):
def f(self):
r = callable_super3()()
return r
class K(G):
def f(self):
return callable_super3()()
classes = [J, K, A3, I, H, A, A2, B, C, D, E, F, G]
random.shuffle(classes)
print(classes)
for cls in classes:
print((cls, cls().f()))
def speed():
class A(object):
def f(self):
return A, self
class myImplicitB(A):
def f(self):
return super3().f()
class myExplicitB(A):
def f(self):
return super3(myExplicitB, self).f()
class pyB(A):
def f(self):
return super(pyB, self).f()
class myDecoratedB(A):
@with_super_result
def f(self, result):
return self
def super_time(cls):
b = cls()
N = 10000
U = 10
s = time.time()
for i in range(1, N):
b.f()
b.f()
b.f()
b.f()
b.f()
b.f()
b.f()
b.f()
b.f()
b.f()
e = time.time()
print((e-s), (e-s)/(N*U))
return (e-s), N*U
py = super_time(pyB)
myI = super_time(myImplicitB)
myE = super_time(myExplicitB)
myD = super_time(myDecoratedB)
print("implicit is", myI[0]/py[0], "times slower than normal super()")
print("explicit is", myE[0]/py[0], "times slower than normal super()")
print("decorated is", myD[0]/py[0], "times slower than normal super()")
speed()
| lgpl-3.0 | 1,098,780,801,679,328,900 | 19.034483 | 75 | 0.512909 | false |
GarethPW/Scratch-Comment-Viewer | old/v2.0.0/scratchcomments.py | 1 | 4830 | '''
Scratch Project Comments Parser v1.0.0
Created for use with SCV Server v2.0.0
Created by Scratch user, Gaza101.
Licensed under GNU General Public License v3.
www.garethpw.net
'''
from HTMLParser import HTMLParser
from htmlentitydefs import name2codepoint
from urllib2 import urlopen
class CommentsParser(HTMLParser):
def __init__(self,emap={}):
self.emap = emap
self.out = []
self.nest = []
self.comments = str()
def aDict(self,a): #Converts attrs into dict format for easier parsing.
d = {} # e.g. [('class', 'example'),('height', '50px')]
for i in a: # becomes {'class':('example',),'height':('50px',)}
if i[0] in d:
d[i[0]] += (i[1],)
else:
d[i[0]] = (i[1],)
return d
def isLegal(self,n,r): #Checks the nest based on a set of rules provided.
try: # Rule format: [(#tuple of tag nest - can be any length - starts from root tag),(#level of nest, #attr, #value)]
if ( tuple([i[0] for i in n][:len(r[0])]) == r[0]
and not (False in [(True in [sr[2] in i for i in n[sr[0]][1][sr[1]]]) for sr in r[1:]]) ):
return True
except KeyError:
pass
return False
def isCName(self,n): #Checks if the current nest is valid to be the comment username.
return self.isLegal(n,[ ("li","div","div","div",'a'),
(0,"class","top-level-reply"),
(1,"class","comment"),
(2,"class","info"),
(3,"class","name") ])
def isCBody(self,n): #Checks if the current nest is valid to be the comment body.
return self.isLegal(n,[ ("li","div","div","div"),
(0,"class","top-level-reply"),
(1,"class","comment"),
(2,"class","info"),
(3,"class","content") ])
def handle_starttag(self, tag, attrs):
il = (self.isCName(self.nest),self.isCBody(self.nest))
self.nest.append((tag,self.aDict(attrs)))
if il != (self.isCName(self.nest),self.isCBody(self.nest)): #Check if a new comment username or body has begun.
self.out.append([]) #If so, append new list to output array.
if tag == "img": #If the tag is valid to be an emoticon,
if ( self.isCBody(self.nest)
and self.isLegal(self.nest,[ tuple(),
(-1,"class","easter-egg") ]) ):
try:
self.out[-1].append(self.emap[self.nest[-1][1]['src'][0]]) #Attempt to match with its alias in the emoticon map.
except KeyError:
self.out[-1].append("_undefined_") #If alias not found, refer to it as "_undefined_"
self.nest.pop() #Remove image from nest array since it's most likely without an end tag.
def handle_endtag(self,tag):
if tag != "img": #Ignore img end tags since they will have already been dealt with.
self.nest.pop()
def handle_data(self,data):
if self.isCName(self.nest) or self.isCBody(self.nest): #If we're in valid comment text,
self.out[-1].append(data) #Append text to output.
def handle_entityref(self,name):
if self.isCName(self.nest) or self.isCBody(self.nest): #If we're in valid comment text,
self.out[-1].append(unichr(name2codepoint[name])) #Append text to output.
def handle_charref(self,name):
if self.isCName(self.nest) or self.isCBody(self.nest): #If we're in valid comment text,
self.out[-1].append(unichr(int(name[1:],16) if name[0] == 'x' else int(name))) #Append text to output.
def parse(self,project_id,max_comments=30): #Parses any data given. Data must be complete.
comments = urlopen("https://scratch.mit.edu/site-api/comments/project/"+str(project_id)+'/').read()
if self.comments != comments: #If we haven't already parsed this,
self.comments = comments
self.out = [] #Reinitialise the instance.
self.nest = []
self.reset() #Reset the parser.
self.feed(self.comments) #Feed the parser the data from the comments of the project specified.
self.out = tuple( [{"user": u''.join([u''.join([unichr(ord(c)) for c in m]) for m in self.out[i]]), #Convert parsed data into a more usable format. e.g. {'user','Gaza101','msg':'_meow_'}
"msg": u''.join([u''.join([unichr(ord(c)) for c in m]) for m in self.out[i+1]])[23:-12]} for i in range(0,min(len(self.out),max_comments),2)] )
return self.out #Output parsed data.
| gpl-3.0 | -5,200,480,149,626,272,000 | 56.5 | 199 | 0.547826 | false |
kyubifire/softlayer-python | SoftLayer/managers/dns.py | 1 | 8207 | """
SoftLayer.dns
~~~~~~~~~~~~~
DNS Manager/helpers
:license: MIT, see LICENSE for more details.
"""
import time
from SoftLayer import utils
class DNSManager(utils.IdentifierMixin, object):
"""Manage SoftLayer DNS.
See product information here: http://www.softlayer.com/DOMAIN-SERVICES
:param SoftLayer.API.BaseClient client: the client instance
"""
def __init__(self, client):
self.client = client
self.service = self.client['Dns_Domain']
self.record = self.client['Dns_Domain_ResourceRecord']
self.resolvers = [self._get_zone_id_from_name]
def _get_zone_id_from_name(self, name):
"""Return zone ID based on a zone."""
results = self.client['Account'].getDomains(
filter={"domains": {"name": utils.query_filter(name)}})
return [x['id'] for x in results]
def list_zones(self, **kwargs):
"""Retrieve a list of all DNS zones.
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: A list of dictionaries representing the matching zones.
"""
return self.client['Account'].getDomains(**kwargs)
def get_zone(self, zone_id, records=True):
"""Get a zone and its records.
:param zone: the zone name
:returns: A dictionary containing a large amount of information about
the specified zone.
"""
mask = None
if records:
mask = 'resourceRecords'
return self.service.getObject(id=zone_id, mask=mask)
def create_zone(self, zone, serial=None):
"""Create a zone for the specified zone.
:param zone: the zone name to create
:param serial: serial value on the zone (default: strftime(%Y%m%d01))
"""
return self.service.createObject({
'name': zone,
'serial': serial or time.strftime('%Y%m%d01'),
"resourceRecords": {}})
def delete_zone(self, zone_id):
"""Delete a zone by its ID.
:param integer zone_id: the zone ID to delete
"""
return self.service.deleteObject(id=zone_id)
def edit_zone(self, zone):
"""Update an existing zone with the options provided.
The provided dict must include an 'id' key and value corresponding
to the zone that should be updated.
:param dict zone: the zone to update
"""
self.service.editObject(zone)
def create_record(self, zone_id, record, record_type, data, ttl=60):
"""Create a resource record on a domain.
:param integer id: the zone's ID
:param record: the name of the record to add
:param record_type: the type of record (A, AAAA, CNAME, TXT, etc.)
:param data: the record's value
:param integer ttl: the TTL or time-to-live value (default: 60)
"""
resource_record = self._generate_create_dict(record, record_type, data,
ttl, domainId=zone_id)
return self.record.createObject(resource_record)
def create_record_mx(self, zone_id, record, data, ttl=60, priority=10):
"""Create a mx resource record on a domain.
:param integer id: the zone's ID
:param record: the name of the record to add
:param data: the record's value
:param integer ttl: the TTL or time-to-live value (default: 60)
:param integer priority: the priority of the target host
"""
resource_record = self._generate_create_dict(record, 'MX', data, ttl,
domainId=zone_id, mxPriority=priority)
return self.record.createObject(resource_record)
def create_record_srv(self, zone_id, record, data, protocol, port, service,
ttl=60, priority=20, weight=10):
"""Create a resource record on a domain.
:param integer id: the zone's ID
:param record: the name of the record to add
:param data: the record's value
:param string protocol: the protocol of the service, usually either TCP or UDP.
:param integer port: the TCP or UDP port on which the service is to be found.
:param string service: the symbolic name of the desired service.
:param integer ttl: the TTL or time-to-live value (default: 60)
:param integer priority: the priority of the target host (default: 20)
:param integer weight: relative weight for records with same priority (default: 10)
"""
resource_record = self._generate_create_dict(record, 'SRV', data, ttl, domainId=zone_id,
priority=priority, protocol=protocol, port=port,
service=service, weight=weight)
# The createObject won't creates SRV records unless we send the following complexType.
resource_record['complexType'] = 'SoftLayer_Dns_Domain_ResourceRecord_SrvType'
return self.record.createObject(resource_record)
def create_record_ptr(self, record, data, ttl=60):
"""Create a reverse record.
:param record: the public ip address of device for which you would like to manage reverse DNS.
:param data: the record's value
:param integer ttl: the TTL or time-to-live value (default: 60)
"""
resource_record = self._generate_create_dict(record, 'PTR', data, ttl)
return self.record.createObject(resource_record)
@staticmethod
def _generate_create_dict(record, record_type, data, ttl, **kwargs):
"""Returns a dict appropriate to pass into Dns_Domain_ResourceRecord::createObject"""
# Basic dns record structure
resource_record = {
'host': record,
'data': data,
'ttl': ttl,
'type': record_type
}
for (key, value) in kwargs.items():
resource_record.setdefault(key, value)
return resource_record
def delete_record(self, record_id):
"""Delete a resource record by its ID.
:param integer id: the record's ID
"""
self.record.deleteObject(id=record_id)
def get_record(self, record_id):
"""Get a DNS record.
:param integer id: the record's ID
"""
return self.record.getObject(id=record_id)
def get_records(self, zone_id, ttl=None, data=None, host=None,
record_type=None):
"""List, and optionally filter, records within a zone.
:param zone: the zone name in which to search.
:param int ttl: time in seconds
:param str data: the records data
:param str host: record's host
:param str record_type: the type of record
:returns: A list of dictionaries representing the matching records
within the specified zone.
"""
_filter = utils.NestedDict()
if ttl:
_filter['resourceRecords']['ttl'] = utils.query_filter(ttl)
if host:
_filter['resourceRecords']['host'] = utils.query_filter(host)
if data:
_filter['resourceRecords']['data'] = utils.query_filter(data)
if record_type:
_filter['resourceRecords']['type'] = utils.query_filter(
record_type.lower())
results = self.service.getResourceRecords(
id=zone_id,
mask='id,expire,domainId,host,minimum,refresh,retry,'
'mxPriority,ttl,type,data,responsiblePerson',
filter=_filter.to_dict(),
)
return results
def edit_record(self, record):
"""Update an existing record with the options provided.
The provided dict must include an 'id' key and value corresponding to
the record that should be updated.
:param dict record: the record to update
"""
self.record.editObject(record, id=record['id'])
def dump_zone(self, zone_id):
"""Retrieve a zone dump in BIND format.
:param integer id: The zone ID to dump
"""
return self.service.getZoneFileContents(id=zone_id)
| mit | 1,036,015,339,823,414,800 | 33.628692 | 102 | 0.60156 | false |
adityahase/frappe | frappe/desk/page/user_profile/user_profile.py | 1 | 2323 | import frappe
from datetime import datetime
@frappe.whitelist()
def get_energy_points_heatmap_data(user, date):
return dict(frappe.db.sql("""select unix_timestamp(date(creation)), sum(points)
from `tabEnergy Point Log`
where
date(creation) > subdate('{date}', interval 1 year) and
date(creation) < subdate('{date}', interval -1 year) and
user = '{user}' and
type != 'Review'
group by date(creation)
order by creation asc""".format(user = user, date = date)))
@frappe.whitelist()
def get_energy_points_percentage_chart_data(user, field):
result = frappe.db.get_all('Energy Point Log',
filters = {'user': user, 'type': ['!=', 'Review']},
group_by = field,
order_by = field,
fields = [field, 'ABS(sum(points)) as points'],
as_list = True)
return {
"labels": [r[0] for r in result if r[0] != None],
"datasets": [{
"values": [r[1] for r in result]
}]
}
@frappe.whitelist()
def get_user_rank(user):
month_start = datetime.today().replace(day=1)
monthly_rank = frappe.db.get_all('Energy Point Log',
group_by = 'user',
filters = {'creation': ['>', month_start], 'type' : ['!=', 'Review']},
fields = ['user', 'sum(points)'],
order_by = 'sum(points) desc',
as_list = True)
all_time_rank = frappe.db.get_all('Energy Point Log',
group_by = 'user',
filters = {'type' : ['!=', 'Review']},
fields = ['user', 'sum(points)'],
order_by = 'sum(points) desc',
as_list = True)
return {
'monthly_rank': [i+1 for i, r in enumerate(monthly_rank) if r[0] == user],
'all_time_rank': [i+1 for i, r in enumerate(all_time_rank) if r[0] == user]
}
@frappe.whitelist()
def update_profile_info(profile_info):
profile_info = frappe.parse_json(profile_info)
keys = ['location', 'interest', 'user_image', 'bio']
for key in keys:
if key not in profile_info:
profile_info[key] = None
user = frappe.get_doc('User', frappe.session.user)
user.update(profile_info)
user.save()
return user
@frappe.whitelist()
def get_energy_points_list(start, limit, user):
return frappe.db.get_list('Energy Point Log',
filters = {'user': user, 'type': ['!=', 'Review']},
fields = ['name','user', 'points', 'reference_doctype', 'reference_name', 'reason',
'type', 'seen', 'rule', 'owner', 'creation', 'revert_of'],
start = start,
limit = limit,
order_by = 'creation desc')
| mit | -6,314,365,701,903,240,000 | 28.782051 | 85 | 0.637538 | false |
ToonTownInfiniteRepo/ToontownInfinite | toontown/estate/DistributedFurnitureItem.py | 1 | 4767 | from toontown.toonbase.ToontownGlobals import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from toontown.catalog import CatalogItem
from toontown.toonbase import ToontownGlobals
from direct.distributed import DistributedObject
from toontown.toonbase import TTLocalizer
import DistributedHouseItem
from direct.distributed import DistributedSmoothNode
from direct.task import Task
import HouseGlobals
class DistributedFurnitureItem(DistributedHouseItem.DistributedHouseItem, DistributedSmoothNode.DistributedSmoothNode):
notify = directNotify.newCategory('DistributedFurnitureItem')
deferFor = 1
def __init__(self, cr):
DistributedHouseItem.DistributedHouseItem.__init__(self, cr)
DistributedSmoothNode.DistributedSmoothNode.__init__(self, cr)
NodePath.__init__(self)
self.localControl = True
self.__broadcastFrequency = 0.25
self.__adjustStarted = 0
self.furnitureMgr = None
self.transmitRelativeTo = None
return
def generate(self):
DistributedHouseItem.DistributedHouseItem.generate(self)
DistributedSmoothNode.DistributedSmoothNode.generate(self)
self.__taskName = self.taskName('sendRequestPosHpr')
def announceGenerate(self):
DistributedHouseItem.DistributedHouseItem.announceGenerate(self)
DistributedSmoothNode.DistributedSmoothNode.announceGenerate(self)
self.load()
def load(self):
pass
def disable(self):
taskMgr.remove(self.__taskName)
self.stopSmooth()
self.furnitureMgr.dfitems.remove(self)
self.furnitureMgr = None
DistributedHouseItem.DistributedHouseItem.disable(self)
DistributedSmoothNode.DistributedSmoothNode.disable(self)
return
def delete(self):
self.removeNode()
del self.item
DistributedHouseItem.DistributedHouseItem.delete(self)
DistributedSmoothNode.DistributedSmoothNode.delete(self)
def setItem(self, furnitureMgrId, blob):
self.furnitureMgr = self.cr.doId2do[furnitureMgrId]
self.furnitureMgr.dfitems.append(self)
self.item = CatalogItem.getItem(blob, store=CatalogItem.Customization)
self.assign(self.loadModel())
interior = self.furnitureMgr.getInteriorObject()
self.reparentTo(interior.interior)
def loadModel(self):
return self.item.loadModel()
def startAdjustPosHpr(self):
if self.__adjustStarted:
return
self.__adjustStarted = 1
self.clearSmoothing()
taskMgr.remove(self.__taskName)
posHpr = self.__getPosHpr()
self.__oldPosHpr = posHpr
self.sendRequestPosHpr(0, *posHpr)
taskMgr.doMethodLater(self.__broadcastFrequency, self.__posHprBroadcast, self.__taskName)
def __posHprBroadcast(self, task):
posHpr = self.__getPosHpr()
if not self.__comparePosHpr(posHpr, self.__oldPosHpr, 0.1):
pass
else:
self.__oldPosHpr = posHpr
self.sendRequestPosHpr(0, *posHpr)
taskMgr.doMethodLater(self.__broadcastFrequency, self.__posHprBroadcast, self.__taskName)
return Task.done
def stopAdjustPosHpr(self):
if not self.__adjustStarted:
return
self.__adjustStarted = 0
taskMgr.remove(self.__taskName)
posHpr = self.__getPosHpr()
self.sendRequestPosHpr(1, *posHpr)
del self.__oldPosHpr
def sendRequestPosHpr(self, final, x, y, z, h, p, r):
t = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('requestPosHpr', (final,
x,
y,
z,
h,
p,
r,
t))
def setMode(self, mode, avId):
if mode == HouseGlobals.FURNITURE_MODE_START:
if avId != base.localAvatar.getDoId():
self.startSmooth()
elif mode == HouseGlobals.FURNITURE_MODE_STOP:
if avId != base.localAvatar.getDoId():
self.stopSmooth()
elif mode == HouseGlobals.FURNITURE_MODE_OFF:
pass
else:
self.notify.warning('setMode: unknown mode: %s avId: %s' % (mode, avId))
def __getPosHpr(self):
if self.transmitRelativeTo == None:
pos = self.getPos()
hpr = self.getHpr()
else:
pos = self.getPos(self.transmitRelativeTo)
hpr = self.getHpr(self.transmitRelativeTo)
return (pos[0],
pos[1],
pos[2],
hpr[0],
hpr[1],
hpr[2])
def __comparePosHpr(self, a, b, threshold):
for i in xrange(len(a)):
if abs(a[i] - b[i]) >= threshold:
return 1
return 0
| mit | 3,282,787,121,327,398,400 | 33.294964 | 119 | 0.643382 | false |
willi-kappler/Snowball_Python | modules/gimmick.py | 1 | 13945 | import random
import pygame
import gfxobject
class Gimmick:
"Class for the funny gimmicks. Note that it doesn't use any of the gfxobject classes"
def __init__(self, screen, level):
self.screen = screen
self.level = level
self.tux = gfxobject.GFXObject(screen, level, level.playerGfx, 0, 0)
self.firedevil = gfxobject.GFXObject(screen, level, level.firedevilGfx, 0, 0)
self.ghost = gfxobject.GFXObject(screen, level, level.ghostGfx, 0, 0)
self.skull = gfxobject.GFXObject(screen, level, level.skullGfx, 0, 0)
self.zombie = gfxobject.GFXObject(screen, level, level.zombieGfx, 0, 0)
self.doSequence = [None, self.seq1, self.seq2, self.seq3, self.seq4]
self.prepareSequence = [None, self.prepareSeq1, self.prepareSeq2, self.prepareSeq3, self.prepareSeq4]
self.sequence = 0
self.time = 0
def prepareSeq1(self):
self.tux.x = -32
self.tux.y = 416
self.tux.animList1 = [(10, 80), (11, 80), (12, 80), (13, 80)]
self.tux.animList2 = [(14,80)]
self.tux.animList = self.tux.animList1
self.tux.animFrame = 0
self.tux.mode = 0
self.tux.step = 20
self.firedevil.x = -96
self.firedevil.y = 416
self.firedevil.animList = [(5, 80), (6, 80), (7, 80), (8, 80)]
self.firedevil.animFrame = 0
self.firedevil.mode = 0
self.ground = [1,0,0,0,0,0,0]
def prepareSeq2(self):
self.skull.x = 512
self.skull.y = 416
self.skull.animList1 = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.skull.animList2 = [(5, 80), (6, 80), (7, 80), (8, 80)]
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
self.skull.mode = 0
self.skull.step = 40
self.ghost.x = 640
self.ghost.y = 416
self.ghost.animList1 = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.ghost.animList2 = [(5, 80), (6, 80), (7, 80), (8, 80)]
self.ghost.animList = self.ghost.animList1
self.ghost.animFrame = 0
self.ghost.mode = 0
self.ground = []
self.ground.append([self.level.greenBottle, self.level.doorClosed, 0, 0, 0, 0])
self.ground.append([2, 2, 2, 2, 2, 2])
def prepareSeq3(self):
self.skull.x = 544
self.skull.y = 416
self.skull.animList1 = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.skull.animList2 = [(5, 80), (6, 80), (7, 80), (8, 80)]
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
self.skull.mode = 0
self.zombie.x = 0
self.zombie.y = 416
self.zombie.animList1 = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.zombie.animList2 = [(5, 80), (6, 80), (7, 80), (8, 80)]
self.zombie.animList = self.zombie.animList2
self.zombie.animFrame = 0
self.zombie.mode = 0
self.leftGround = []
self.leftGround.append([1, 1, 1, self.level.spikeNo + 2, 0])
self.leftGround.append([0, 0, 0, self.level.doorOpened + 1, self.level.heartNo + 1])
self.leftGround.append([2, 2, 2, self.level.spikeNo + 1, 2])
self.ground = []
self.ground.append([0, 0, self.level.switchMin])
self.ground.append([2, 2, 2])
def prepareSeq4(self):
pass
def seq1(self): # tux and firedevil
if self.tux.mode == 0:
self.tux.x += 2
self.tux.step -= 1
if self.tux.step == 0:
self.tux.mode = 1
self.tux.animList = self.tux.animList2
self.tux.animFrame = 0
self.tux.step = 8
self.ground[(self.tux.x / 32) + 1] = 1 # put blocks on ground
self.firedevil.mode = 1
if self.firedevil.x > 32:
self.ground[(self.firedevil.x / 32) - 1] = 0 # take blocks from ground
if self.tux.x > 160:
self.tux.mode = 2
self.firedevil.mode = 1
self.tux.animList = [(0, 80)] # turn around
self.tux.animFrame = 0
self.tux.step = 32 # and wait
self.firedevil.animList = [(5, 80)]
self.firedevil.animFrame = 0
elif self.tux.mode == 1:
self.tux.step -= 1 # wait and bow
if self.tux.step == 0:
self.tux.mode = 0
self.tux.animList = self.tux.animList1 # move on
self.tux.animFrame = 0
self.tux.step = 16
self.firedevil.mode = 0
elif self.tux.mode == 2:
self.tux.step -= 1 # wait
if self.tux.step == 0:
self.tux.mode = 3
self.tux.step = 32
elif self.tux.mode == 3:
self.screen.blit(self.level.frontGfx[self.level.heartNo], (140, 400)) # show heart
self.tux.step -= 1 # and wait
if self.tux.step == 0:
self.tux.mode = 4
self.tux.animList = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.tux.animFrame = 0
self.firedevil.mode = 2
self.firedevil.animList = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.firedevil.animFrame = 0
elif self.tux.mode == 4:
self.tux.x -= 6 # you know what you want.... go and get it!
if self.tux.x > 0:
self.ground[(self.tux.x / 32) + 1] = 0 # remove blocks
else:
self.sequence = 0
self.time = pygame.time.get_ticks()
self.tux.go()
if self.firedevil.mode == 0:
self.firedevil.x += 2
elif self.firedevil.mode == 2:
self.firedevil.x -= 6 # run for your life!
if self.firedevil.x > 32:
self.ground[(self.firedevil.x / 32) - 1] = 1 # put blocks
self.firedevil.go()
for i in range(6):
if self.ground[i] == 1:
self.screen.blit(self.level.frontGfx[1], (i*32, 448))
def seq2(self): # skull and ghost
for i in range(6):
if self.ground[0][i] > 0:
self.screen.blit(self.level.frontGfx[self.ground[0][i]], (448 + (i*32), 416))
if self.ground[1][i] > 0:
self.screen.blit(self.level.frontGfx[self.ground[1][i]], (448 + (i*32), 448))
if self.skull.mode == 1:
self.skull.step -= 1 # wait in front of the door
if self.skull.step == 0:
self.skull.mode = 2
self.skull.animList = self.skull.animList2 # turn around
self.skull.animFrame = 0
elif self.skull.mode == 2:
self.skull.x += 2 # move to ghost
if self.skull.x >= 580:
self.skull.mode = 3
self.skull.step = 40
elif self.skull.mode == 3:
self.skull.step -= 1 # babble a lot of stuff meaningless stuff to ghost
if self.skull.step == 0:
self.skull.mode = 0 # wait
self.skull.animList = [(1, 80)] # turn around
self.skull.animFrame = 0
self.ghost.mode = 2
elif self.skull.mode == 4:
self.skull.step -= 1 # babble to ghost again...
if self.skull.step == 0:
self.skull.mode = 0 # wait
self.skull.animList = [(1, 80)]
self.skull.animFrame = 0
self.ghost.mode = 4
self.ghost.animList = self.ghost.animList1
self.ghost.animFrame = 0
elif self.skull.mode == 5:
self.skull.x -= 2
if self.skull.x <= 540:
self.ground[0][3] = 0
self.skull.mode = 0
self.skull.go()
if self.ghost.mode == 0:
self.ghost.x -= 2 # sneek in
if self.ghost.x <= 608:
self.ghost.mode = 1
self.skull.mode = 1
elif self.ghost.mode == 2:
self.ghost.x -= 2 # move to door
if self.ghost.x <= 512:
self.ghost.mode = 3 # wait
self.skull.step = 30
elif self.ghost.mode == 3:
self.skull.step -= 1
if self.skull.step == 0:
self.ghost.mode = 1 # wait
self.ghost.animList = self.ghost.animList2 # turn around
self.ghost.animFrame = 0
self.skull.step = 30
self.skull.mode = 4
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
elif self.ghost.mode == 4:
self.ghost.x -= 2
if self.ghost.x <= 448:
self.ghost.mode = 5
self.skull.step = 30
elif self.ghost.mode == 5:
self.skull.step -= 1
if self.skull.step == 0:
self.ground[0][0] = 0
self.ghost.mode = 6
self.ghost.animList = self.ghost.animList2
self.ghost.animFrame = 0
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
elif self.ghost.mode == 6:
self.ghost.x += 2
if self.ghost.x >= 548:
self.ground[0][3] = self.level.greenBottle
self.ghost.mode = 7
self.skull.mode = 5
elif self.ghost.mode == 7:
self.ghost.x += 2
if self.ghost.x >= 640:
self.sequence = 0
self.time = pygame.time.get_ticks()
self.ghost.go()
def seq3(self): # zombie and skull
for i in range(5):
if self.leftGround[0][i] > 0:
self.screen.blit(self.level.frontGfx[self.leftGround[0][i]], (i*32, 384))
if self.leftGround[1][i] > 0:
self.screen.blit(self.level.frontGfx[self.leftGround[1][i]], (i*32, 416))
if self.leftGround[2][i] > 0:
self.screen.blit(self.level.frontGfx[self.leftGround[2][i]], (i*32, 448))
for i in range(3):
if self.ground[0][i] > 0:
self.screen.blit(self.level.frontGfx[self.ground[0][i]], (544 + (i*32), 416))
if self.ground[1][i] > 0:
self.screen.blit(self.level.frontGfx[self.ground[1][i]], (544 + (i*32), 448))
if self.skull.mode == 1: # fast! got to the switch! the stupid zombie is comming...
self.skull.x += 2
if self.skull.x >= 580:
self.skull.mode = 2
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
self.leftGround[1][3] = self.level.redOn
if self.skull.mode == 2: # go back and enjoy the show
self.skull.x -= 2
if self.skull.x <= 544:
self.skull.mode = 0 # wait
if self.skull.mode == 3: # one more time...
self.skull.x += 2
if self.skull.x >= 580:
self.skull.mode = 2
self.skull.animList = self.skull.animList1
self.skull.animFrame = 0
self.leftGround[1][3] = self.level.doorOpened + 1
self.skull.go()
if self.zombie.mode == 0: # nice shiny coin! zombie want coin! zombie must have coin!
self.zombie.x += 1
if self.zombie.x == 32:
self.skull.mode = 1
self.skull.animList = self.skull.animList2
self.skull.animFrame = 0
elif self.zombie.x == 64:
self.zombie.mode = 1
self.zombie.animList = self.zombie.animList1
self.zombie.animFrame = 0
elif self.zombie.mode == 1: # arrgh! turn around and move back... zombie no coin...
self.zombie.x -= 1
if self.zombie.x == 32:
self.skull.mode = 3
self.skull.animList = self.skull.animList2
self.skull.animFrame = 0
elif self.zombie.x == 0:
self.zombie.mode = 2
self.zombie.animList = self.zombie.animList2
self.zombie.animFrame = 0
elif self.zombie.mode == 2: # coin there again! zombie want coin!
self.zombie.x += 1
if self.zombie.x == 32:
self.skull.mode = 1
self.skull.animList = self.skull.animList2
self.skull.animFrame = 0
elif self.zombie.x == 64:
self.zombie.mode = 3
self.zombie.animList = self.zombie.animList1
self.zombie.animFrame = 0
elif self.zombie.mode == 3: # zombie go home... zombie no want play...
self.zombie.x -= 1
if self.zombie.x == 32:
self.zombie.mode = 4
self.zombie.animList = [(5, 80)]
self.zombie.animFrame = 0
self.zombie.step = 30
elif self.zombie.mode == 4: # coin ?? coin ?? no coin....
self.zombie.step -= 1
if self.zombie.step == 0:
self.zombie.mode = 5
self.zombie.animList = self.zombie.animList1
self.zombie.animFrame = 0
elif self.zombie.mode == 5: # zombie away...
self.zombie.x -= 1
if self.zombie.x == -16:
self.sequence = 0
self.time = pygame.time.get_ticks()
self.zombie.go()
def seq4(self):
pass
def reset(self):
self.sequence = 0
self.time = pygame.time.get_ticks()
def go(self):
if self.sequence == 0:
if pygame.time.get_ticks() > self.time + 5000:
self.time = pygame.time.get_ticks()
self.sequence = random.randint(0, 3)
if self.sequence > 0:
self.prepareSequence[self.sequence]()
else:
self.doSequence[self.sequence]()
| gpl-2.0 | -7,936,916,952,836,410,000 | 38.616477 | 109 | 0.502617 | false |
googleapis/googleapis-gen | google/cloud/websecurityscanner/v1/websecurityscanner-v1-py/google/cloud/websecurityscanner_v1/types/finding_type_stats.py | 1 | 1388 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.websecurityscanner.v1',
manifest={
'FindingTypeStats',
},
)
class FindingTypeStats(proto.Message):
r"""A FindingTypeStats resource represents stats regarding a
specific FindingType of Findings under a given ScanRun.
Attributes:
finding_type (str):
Output only. The finding type associated with
the stats.
finding_count (int):
Output only. The count of findings belonging
to this finding type.
"""
finding_type = proto.Field(
proto.STRING,
number=1,
)
finding_count = proto.Field(
proto.INT32,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -8,043,455,129,311,050,000 | 26.76 | 74 | 0.668588 | false |
robotika/husky | ros/tcpros.py | 1 | 3927 | """
Parsing TCPROS communication protocol
usage:
./tcpros <log file to replay>
"""
import socket
import struct
import sys
import datetime
def prefix4BytesLen( s ):
"adding ROS length"
return struct.pack("I", len(s)) + s
def splitLenStr( data ):
ret = []
while len(data) >= 4:
size = struct.unpack( "I", data[:4] )[0]
data = data[4:]
ret.append( data[:size] )
data = data[size:]
return ret
class LoggedStream:
def __init__( self, readFn=None, writeFn=None, prefix="" ):
self.readFn = readFn
self.writeFn = writeFn
dt = datetime.datetime.now()
self.filename = prefix + dt.strftime("%y%m%d_%H%M%S.log")
self.logFile = open( "logs/" + self.filename, "wb" )
print "LogIt:", self.filename
self.buf = ""
def readMsg( self ):
try:
data = self.readFn( 4096 )
except socket.timeout as e:
assert False, e # it should contain partial data
except socket.error as (errno, errStr):
assert errno in [10035,11], (errno, errStr)
# Windows 'A non-blocking socket operation could not be completed immediately'
# Linux (11, 'Resource temporarily unavailable')
data = ""
self.logFile.write( data )
self.logFile.flush()
self.buf += data
if len(self.buf) >= 4:
num = struct.unpack("I", self.buf[:4])[0]
if len(self.buf) >= 4 + num:
data = self.buf[4:4+num]
self.buf = self.buf[4+num:]
return data
return None
def writeMsg( self, msg ):
data = prefix4BytesLen( msg )
self.logFile.write( data )
self.logFile.flush()
self.writeFn( data )
class ReplayLoggedStream:
def __init__( self, filename, assertWrite ):
self.filename = filename
self.logFile = open( self.filename, "rb" )
print "ReplayLog:", self.filename
self.assertWrite = assertWrite
def readMsg( self ):
data = self.logFile.read( 4 )
if len(data) >= 4:
num = struct.unpack("I", data[:4])[0]
return self.logFile.read( num )
return None
def writeMsg( self, msg ):
data = prefix4BytesLen( msg )
ref = self.logFile.read( len(data) )
if self.assertWrite:
assert data == ref, (ref,data)
class Tcpros:
"TCPROS communication protocol"
def __init__( self, readFn=None, readMsgFn=None, verbose=False ):
self.readFn = readFn
self.readMsgFn = readMsgFn
self.topicType = None
self.verbose = verbose
def readMsg( self ):
"skip very first message - topic description"
if self.topicType == None:
m = self._readMsg()
if m != None:
self.topicType = splitLenStr(m)
if self.verbose:
for s in self.topicType:
print s
return self._readMsg()
return None
return self._readMsg()
def _readMsg( self ):
if self.readMsgFn:
return self.readMsgFn()
data = self.readFn(4)
if len(data) == 0:
return None
size = struct.unpack("I", data)[0]
return self.readFn( size )
if __name__ == "__main__":
from msgs import *
if len(sys.argv) < 2:
print __doc__
sys.exit(1)
t = Tcpros( open(sys.argv[1], "rb").read )
while 1:
m = t.readMsg()
if m == None:
break
# print t.parseImu(m)
# print t.parseEncoders(m)
# print t.parsePower(m)
# print parseString(m)
# print parseJoy(m)
print parseSafety(m)
print "--------------"
#-------------------------------------------------------------------
# vim: expandtab sw=4 ts=4
| mit | 264,870,536,879,751,900 | 27.664234 | 93 | 0.520499 | false |
rht/zulip | zerver/tests/test_narrow.py | 1 | 137439 | # -*- coding: utf-8 -*-
from django.db import connection
from django.test import TestCase, override_settings
from sqlalchemy.sql import (
and_, select, column, table,
)
from sqlalchemy.sql.elements import ClauseElement
from zerver.models import (
Realm, Subscription, Recipient, Stream,
get_display_recipient, get_personal_recipient, get_realm, get_stream,
UserMessage, get_stream_recipient, Message
)
from zerver.lib.actions import (
do_set_realm_property,
do_deactivate_user,
create_streams_if_needed
)
from zerver.lib.message import (
MessageDict,
)
from zerver.lib.narrow import (
build_narrow_filter,
is_web_public_compatible,
)
from zerver.lib.request import JsonableError
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.test_helpers import (
POSTRequestMock,
get_user_messages, queries_captured,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.topic import (
MATCH_TOPIC,
TOPIC_NAME,
)
from zerver.lib.topic_mutes import (
set_topic_mutes,
)
from zerver.lib.types import DisplayRecipientT
from zerver.views.messages import (
exclude_muting_conditions,
get_messages_backend, ok_to_include_history,
NarrowBuilder, BadNarrowOperator, Query,
post_process_limited_query,
find_first_unread_anchor,
LARGER_THAN_MAX_MESSAGE_ID,
)
from typing import Dict, Mapping, List, Sequence, Tuple, Union, Any, Optional
import mock
import os
import ujson
def get_sqlalchemy_sql(query: ClauseElement) -> str:
dialect = get_sqlalchemy_connection().dialect
comp = query.compile(dialect=dialect)
return str(comp)
def get_sqlalchemy_query_params(query: ClauseElement) -> Dict[str, object]:
dialect = get_sqlalchemy_connection().dialect
comp = query.compile(dialect=dialect)
return comp.params
def get_recipient_id_for_stream_name(realm: Realm, stream_name: str) -> str:
stream = get_stream(stream_name, realm)
return get_stream_recipient(stream.id).id
def mute_stream(realm: Realm, user_profile: str, stream_name: str) -> None:
stream = get_stream(stream_name, realm)
recipient = get_stream_recipient(stream.id)
subscription = Subscription.objects.get(recipient=recipient, user_profile=user_profile)
subscription.is_muted = True
subscription.save()
def first_visible_id_as(message_id: int) -> Any:
return mock.patch(
'zerver.views.messages.get_first_visible_message_id',
return_value=message_id,
)
class NarrowBuilderTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.realm = get_realm('zulip')
self.user_profile = self.example_user('hamlet')
self.builder = NarrowBuilder(self.user_profile, column('id'))
self.raw_query = select([column("id")], None, table("zerver_message"))
def test_add_term_using_not_defined_operator(self) -> None:
term = dict(operator='not-defined', operand='any')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_stream_operator(self) -> None:
term = dict(operator='stream', operand='Scotland')
self._do_add_term_test(term, 'WHERE recipient_id = %(recipient_id_1)s')
def test_add_term_using_stream_operator_and_negated(self) -> None: # NEGATED
term = dict(operator='stream', operand='Scotland', negated=True)
self._do_add_term_test(term, 'WHERE recipient_id != %(recipient_id_1)s')
def test_add_term_using_stream_operator_and_non_existing_operand_should_raise_error(
self) -> None: # NEGATED
term = dict(operator='stream', operand='NonExistingStream')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_is_operator_and_private_operand(self) -> None:
term = dict(operator='is', operand='private')
self._do_add_term_test(term, 'WHERE (flags & %(flags_1)s) != %(param_1)s')
def test_add_term_using_streams_operator_and_invalid_operand_should_raise_error(
self) -> None: # NEGATED
term = dict(operator='streams', operand='invalid_operands')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_streams_operator_and_public_stream_operand(self) -> None:
term = dict(operator='streams', operand='public')
self._do_add_term_test(term, 'WHERE recipient_id IN (%(recipient_id_1)s, %(recipient_id_2)s, %(recipient_id_3)s, %(recipient_id_4)s, %(recipient_id_5)s)')
# Add new streams
stream_dicts = [
{
"name": "publicstream",
"description": "Public stream with public history"
},
{
"name": "privatestream",
"description": "Private stream with non-public history",
"invite_only": True
},
{
"name": "privatewithhistory",
"description": "Private stream with public history",
"invite_only": True,
"history_public_to_subscribers": True
}
] # type: List[Mapping[str, Any]]
realm = get_realm('zulip')
created, existing = create_streams_if_needed(realm, stream_dicts)
self.assertEqual(len(created), 3)
self.assertEqual(len(existing), 0)
# Number of recipient ids will increase by 1 and not 3
self._do_add_term_test(term, 'WHERE recipient_id IN (%(recipient_id_1)s, %(recipient_id_2)s, %(recipient_id_3)s, %(recipient_id_4)s, %(recipient_id_5)s, %(recipient_id_6)s)')
def test_add_term_using_streams_operator_and_public_stream_operand_negated(self) -> None:
term = dict(operator='streams', operand='public', negated=True)
self._do_add_term_test(term, 'WHERE recipient_id NOT IN (%(recipient_id_1)s, %(recipient_id_2)s, %(recipient_id_3)s, %(recipient_id_4)s, %(recipient_id_5)s)')
# Add new streams
stream_dicts = [
{
"name": "publicstream",
"description": "Public stream with public history"
},
{
"name": "privatestream",
"description": "Private stream with non-public history",
"invite_only": True
},
{
"name": "privatewithhistory",
"description": "Private stream with public history",
"invite_only": True,
"history_public_to_subscribers": True
}
] # type: List[Mapping[str, Any]]
realm = get_realm('zulip')
created, existing = create_streams_if_needed(realm, stream_dicts)
self.assertEqual(len(created), 3)
self.assertEqual(len(existing), 0)
# Number of recipient ids will increase by 1 and not 3
self._do_add_term_test(term, 'WHERE recipient_id NOT IN (%(recipient_id_1)s, %(recipient_id_2)s, %(recipient_id_3)s, %(recipient_id_4)s, %(recipient_id_5)s, %(recipient_id_6)s)')
def test_add_term_using_is_operator_private_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='is', operand='private', negated=True)
self._do_add_term_test(term, 'WHERE (flags & %(flags_1)s) = %(param_1)s')
def test_add_term_using_is_operator_and_non_private_operand(self) -> None:
for operand in ['starred', 'mentioned', 'alerted']:
term = dict(operator='is', operand=operand)
self._do_add_term_test(term, 'WHERE (flags & %(flags_1)s) != %(param_1)s')
def test_add_term_using_is_operator_and_unread_operand(self) -> None:
term = dict(operator='is', operand='unread')
self._do_add_term_test(term, 'WHERE (flags & %(flags_1)s) = %(param_1)s')
def test_add_term_using_is_operator_and_unread_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='is', operand='unread', negated=True)
self._do_add_term_test(term, 'WHERE (flags & %(flags_1)s) != %(param_1)s')
def test_add_term_using_is_operator_non_private_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='is', operand='starred', negated=True)
where_clause = 'WHERE (flags & %(flags_1)s) = %(param_1)s'
params = dict(
flags_1=UserMessage.flags.starred.mask,
param_1=0
)
self._do_add_term_test(term, where_clause, params)
term = dict(operator='is', operand='alerted', negated=True)
where_clause = 'WHERE (flags & %(flags_1)s) = %(param_1)s'
params = dict(
flags_1=UserMessage.flags.has_alert_word.mask,
param_1=0
)
self._do_add_term_test(term, where_clause, params)
term = dict(operator='is', operand='mentioned', negated=True)
where_clause = 'WHERE NOT ((flags & %(flags_1)s) != %(param_1)s OR (flags & %(flags_2)s) != %(param_2)s)'
params = dict(
flags_1=UserMessage.flags.mentioned.mask,
param_1=0,
flags_2=UserMessage.flags.wildcard_mentioned.mask,
param_2=0
)
self._do_add_term_test(term, where_clause, params)
def test_add_term_using_non_supported_operator_should_raise_error(self) -> None:
term = dict(operator='is', operand='non_supported')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_topic_operator_and_lunch_operand(self) -> None:
term = dict(operator='topic', operand='lunch')
self._do_add_term_test(term, 'WHERE upper(subject) = upper(%(param_1)s)')
def test_add_term_using_topic_operator_lunch_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='topic', operand='lunch', negated=True)
self._do_add_term_test(term, 'WHERE upper(subject) != upper(%(param_1)s)')
def test_add_term_using_topic_operator_and_personal_operand(self) -> None:
term = dict(operator='topic', operand='personal')
self._do_add_term_test(term, 'WHERE upper(subject) = upper(%(param_1)s)')
def test_add_term_using_topic_operator_personal_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='topic', operand='personal', negated=True)
self._do_add_term_test(term, 'WHERE upper(subject) != upper(%(param_1)s)')
def test_add_term_using_sender_operator(self) -> None:
term = dict(operator='sender', operand=self.example_email("othello"))
self._do_add_term_test(term, 'WHERE sender_id = %(param_1)s')
def test_add_term_using_sender_operator_and_negated(self) -> None: # NEGATED
term = dict(operator='sender', operand=self.example_email("othello"), negated=True)
self._do_add_term_test(term, 'WHERE sender_id != %(param_1)s')
def test_add_term_using_sender_operator_with_non_existing_user_as_operand(
self) -> None: # NEGATED
term = dict(operator='sender', operand='[email protected]')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_pm_with_operator_and_not_the_same_user_as_operand(self) -> None:
term = dict(operator='pm-with', operand=self.example_email("othello"))
self._do_add_term_test(term, 'WHERE sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s OR sender_id = %(sender_id_2)s AND recipient_id = %(recipient_id_2)s')
def test_add_term_using_pm_with_operator_not_the_same_user_as_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='pm-with', operand=self.example_email("othello"), negated=True)
self._do_add_term_test(term, 'WHERE NOT (sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s OR sender_id = %(sender_id_2)s AND recipient_id = %(recipient_id_2)s)')
def test_add_term_using_pm_with_operator_the_same_user_as_operand(self) -> None:
term = dict(operator='pm-with', operand=self.example_email("hamlet"))
self._do_add_term_test(term, 'WHERE sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s')
def test_add_term_using_pm_with_operator_the_same_user_as_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='pm-with', operand=self.example_email("hamlet"), negated=True)
self._do_add_term_test(term, 'WHERE NOT (sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s)')
def test_add_term_using_pm_with_operator_and_self_and_user_as_operand(self) -> None:
term = dict(operator='pm-with', operand='[email protected], [email protected]')
self._do_add_term_test(term, 'WHERE sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s OR sender_id = %(sender_id_2)s AND recipient_id = %(recipient_id_2)s')
def test_add_term_using_pm_with_operator_more_than_one_user_as_operand(self) -> None:
term = dict(operator='pm-with', operand='[email protected], [email protected]')
self._do_add_term_test(term, 'WHERE recipient_id = %(recipient_id_1)s')
def test_add_term_using_pm_with_operator_self_and_user_as_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='pm-with', operand='[email protected], [email protected]', negated=True)
self._do_add_term_test(term, 'WHERE NOT (sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s OR sender_id = %(sender_id_2)s AND recipient_id = %(recipient_id_2)s)')
def test_add_term_using_pm_with_operator_more_than_one_user_as_operand_and_negated(self) -> None:
term = dict(operator='pm-with', operand='[email protected], [email protected]', negated=True)
self._do_add_term_test(term, 'WHERE recipient_id != %(recipient_id_1)s')
def test_add_term_using_pm_with_operator_with_comma_noise(self) -> None:
term = dict(operator='pm-with', operand=' ,,, ,,, ,')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_pm_with_operator_with_existing_and_non_existing_user_as_operand(self) -> None:
term = dict(operator='pm-with', operand='[email protected],[email protected]')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_id_operator(self) -> None:
term = dict(operator='id', operand=555)
self._do_add_term_test(term, 'WHERE id = %(param_1)s')
def test_add_term_using_id_operator_invalid(self) -> None:
term = dict(operator='id', operand='')
self.assertRaises(BadNarrowOperator, self._build_query, term)
term = dict(operator='id', operand='notanint')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_id_operator_and_negated(self) -> None: # NEGATED
term = dict(operator='id', operand=555, negated=True)
self._do_add_term_test(term, 'WHERE id != %(param_1)s')
def test_add_term_using_group_pm_operator_and_not_the_same_user_as_operand(self) -> None:
# Test wtihout any such group PM threads existing
term = dict(operator='group-pm-with', operand=self.example_email("othello"))
self._do_add_term_test(term, 'WHERE 1 != 1')
# Test with at least one such group PM thread existing
self.send_huddle_message(self.user_profile.email, [self.example_email("othello"),
self.example_email("cordelia")])
term = dict(operator='group-pm-with', operand=self.example_email("othello"))
self._do_add_term_test(term, 'WHERE recipient_id IN (%(recipient_id_1)s)')
def test_add_term_using_group_pm_operator_not_the_same_user_as_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='group-pm-with', operand=self.example_email("othello"), negated=True)
self._do_add_term_test(term, 'WHERE 1 = 1')
def test_add_term_using_group_pm_operator_with_non_existing_user_as_operand(self) -> None:
term = dict(operator='group-pm-with', operand='[email protected]')
self.assertRaises(BadNarrowOperator, self._build_query, term)
@override_settings(USING_PGROONGA=False)
def test_add_term_using_search_operator(self) -> None:
term = dict(operator='search', operand='"french fries"')
self._do_add_term_test(term, 'WHERE (content ILIKE %(content_1)s OR subject ILIKE %(subject_1)s) AND (search_tsvector @@ plainto_tsquery(%(param_4)s, %(param_5)s))')
@override_settings(USING_PGROONGA=False)
def test_add_term_using_search_operator_and_negated(
self) -> None: # NEGATED
term = dict(operator='search', operand='"french fries"', negated=True)
self._do_add_term_test(term, 'WHERE NOT (content ILIKE %(content_1)s OR subject ILIKE %(subject_1)s) AND NOT (search_tsvector @@ plainto_tsquery(%(param_4)s, %(param_5)s))')
@override_settings(USING_PGROONGA=True)
def test_add_term_using_search_operator_pgroonga(self) -> None:
term = dict(operator='search', operand='"french fries"')
self._do_add_term_test(term, 'WHERE search_pgroonga &@~ escape_html(%(escape_html_1)s)')
@override_settings(USING_PGROONGA=True)
def test_add_term_using_search_operator_and_negated_pgroonga(
self) -> None: # NEGATED
term = dict(operator='search', operand='"french fries"', negated=True)
self._do_add_term_test(term, 'WHERE NOT (search_pgroonga &@~ escape_html(%(escape_html_1)s))')
def test_add_term_using_has_operator_and_attachment_operand(self) -> None:
term = dict(operator='has', operand='attachment')
self._do_add_term_test(term, 'WHERE has_attachment')
def test_add_term_using_has_operator_attachment_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='has', operand='attachment', negated=True)
self._do_add_term_test(term, 'WHERE NOT has_attachment')
def test_add_term_using_has_operator_and_image_operand(self) -> None:
term = dict(operator='has', operand='image')
self._do_add_term_test(term, 'WHERE has_image')
def test_add_term_using_has_operator_image_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='has', operand='image', negated=True)
self._do_add_term_test(term, 'WHERE NOT has_image')
def test_add_term_using_has_operator_and_link_operand(self) -> None:
term = dict(operator='has', operand='link')
self._do_add_term_test(term, 'WHERE has_link')
def test_add_term_using_has_operator_link_operand_and_negated(
self) -> None: # NEGATED
term = dict(operator='has', operand='link', negated=True)
self._do_add_term_test(term, 'WHERE NOT has_link')
def test_add_term_using_has_operator_non_supported_operand_should_raise_error(self) -> None:
term = dict(operator='has', operand='non_supported')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_in_operator(self) -> None:
mute_stream(self.realm, self.user_profile, 'Verona')
term = dict(operator='in', operand='home')
self._do_add_term_test(term, 'WHERE recipient_id NOT IN (%(recipient_id_1)s)')
def test_add_term_using_in_operator_and_negated(self) -> None:
# negated = True should not change anything
mute_stream(self.realm, self.user_profile, 'Verona')
term = dict(operator='in', operand='home', negated=True)
self._do_add_term_test(term, 'WHERE recipient_id NOT IN (%(recipient_id_1)s)')
def test_add_term_using_in_operator_and_all_operand(self) -> None:
mute_stream(self.realm, self.user_profile, 'Verona')
term = dict(operator='in', operand='all')
query = self._build_query(term)
self.assertEqual(get_sqlalchemy_sql(query), 'SELECT id \nFROM zerver_message')
def test_add_term_using_in_operator_all_operand_and_negated(self) -> None:
# negated = True should not change anything
mute_stream(self.realm, self.user_profile, 'Verona')
term = dict(operator='in', operand='all', negated=True)
query = self._build_query(term)
self.assertEqual(get_sqlalchemy_sql(query), 'SELECT id \nFROM zerver_message')
def test_add_term_using_in_operator_and_not_defined_operand(self) -> None:
term = dict(operator='in', operand='not_defined')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_near_operator(self) -> None:
term = dict(operator='near', operand='operand')
query = self._build_query(term)
self.assertEqual(get_sqlalchemy_sql(query), 'SELECT id \nFROM zerver_message')
def _do_add_term_test(self, term: Dict[str, Any], where_clause: str,
params: Optional[Dict[str, Any]]=None) -> None:
query = self._build_query(term)
if params is not None:
actual_params = get_sqlalchemy_query_params(query)
self.assertEqual(actual_params, params)
self.assertIn(where_clause, get_sqlalchemy_sql(query))
def _build_query(self, term: Dict[str, Any]) -> Query:
return self.builder.add_term(self.raw_query, term)
class NarrowLibraryTest(TestCase):
def test_build_narrow_filter(self) -> None:
fixtures_path = os.path.join(os.path.dirname(__file__),
'fixtures/narrow.json')
scenarios = ujson.loads(open(fixtures_path, 'r').read())
self.assertTrue(len(scenarios) == 9)
for scenario in scenarios:
narrow = scenario['narrow']
accept_events = scenario['accept_events']
reject_events = scenario['reject_events']
narrow_filter = build_narrow_filter(narrow)
for e in accept_events:
self.assertTrue(narrow_filter(e))
for e in reject_events:
self.assertFalse(narrow_filter(e))
def test_build_narrow_filter_invalid(self) -> None:
with self.assertRaises(JsonableError):
build_narrow_filter(["invalid_operator", "operand"])
def test_is_web_public_compatible(self) -> None:
self.assertTrue(is_web_public_compatible([]))
self.assertTrue(is_web_public_compatible([{"operator": "has",
"operand": "attachment"}]))
self.assertTrue(is_web_public_compatible([{"operator": "has",
"operand": "image"}]))
self.assertTrue(is_web_public_compatible([{"operator": "search",
"operand": "magic"}]))
self.assertTrue(is_web_public_compatible([{"operator": "near",
"operand": "15"}]))
self.assertTrue(is_web_public_compatible([{"operator": "id",
"operand": "15"},
{"operator": "has",
"operand": "attachment"}]))
self.assertTrue(is_web_public_compatible([{"operator": "sender",
"operand": "[email protected]"}]))
self.assertFalse(is_web_public_compatible([{"operator": "pm-with",
"operand": "[email protected]"}]))
self.assertFalse(is_web_public_compatible([{"operator": "group-pm-with",
"operand": "[email protected]"}]))
self.assertTrue(is_web_public_compatible([{"operator": "stream",
"operand": "Denmark"}]))
self.assertTrue(is_web_public_compatible([{"operator": "stream",
"operand": "Denmark"},
{"operator": "topic",
"operand": "logic"}]))
self.assertFalse(is_web_public_compatible([{"operator": "is",
"operand": "starred"}]))
self.assertFalse(is_web_public_compatible([{"operator": "is",
"operand": "private"}]))
self.assertTrue(is_web_public_compatible([{"operator": "streams",
"operand": "public"}]))
# Malformed input not allowed
self.assertFalse(is_web_public_compatible([{"operator": "has"}]))
class IncludeHistoryTest(ZulipTestCase):
def test_ok_to_include_history(self) -> None:
user_profile = self.example_user("hamlet")
self.make_stream('public_stream', realm=user_profile.realm)
# Negated stream searches should not include history.
narrow = [
dict(operator='stream', operand='public_stream', negated=True),
]
self.assertFalse(ok_to_include_history(narrow, user_profile))
# streams:public searches should include history for non-guest members.
narrow = [
dict(operator='streams', operand='public'),
]
self.assertTrue(ok_to_include_history(narrow, user_profile))
# Negated -streams:public searches should not include history.
narrow = [
dict(operator='streams', operand='public', negated=True),
]
self.assertFalse(ok_to_include_history(narrow, user_profile))
# Definitely forbid seeing history on private streams.
self.make_stream('private_stream', realm=user_profile.realm, invite_only=True)
subscribed_user_profile = self.example_user("cordelia")
self.subscribe(subscribed_user_profile, 'private_stream')
narrow = [
dict(operator='stream', operand='private_stream'),
]
self.assertFalse(ok_to_include_history(narrow, user_profile))
# Verify that with stream.history_public_to_subscribers, subscribed
# users can access history.
self.make_stream('private_stream_2', realm=user_profile.realm,
invite_only=True, history_public_to_subscribers=True)
subscribed_user_profile = self.example_user("cordelia")
self.subscribe(subscribed_user_profile, 'private_stream_2')
narrow = [
dict(operator='stream', operand='private_stream_2'),
]
self.assertFalse(ok_to_include_history(narrow, user_profile))
self.assertTrue(ok_to_include_history(narrow, subscribed_user_profile))
# History doesn't apply to PMs.
narrow = [
dict(operator='is', operand='private'),
]
self.assertFalse(ok_to_include_history(narrow, user_profile))
# History doesn't apply to unread messages.
narrow = [
dict(operator='is', operand='unread'),
]
self.assertFalse(ok_to_include_history(narrow, user_profile))
# If we are looking for something like starred messages, there is
# no point in searching historical messages.
narrow = [
dict(operator='stream', operand='public_stream'),
dict(operator='is', operand='starred'),
]
self.assertFalse(ok_to_include_history(narrow, user_profile))
# No point in searching history for is operator even if included with
# streams:public
narrow = [
dict(operator='streams', operand='public'),
dict(operator='is', operand='mentioned'),
]
self.assertFalse(ok_to_include_history(narrow, user_profile))
narrow = [
dict(operator='streams', operand='public'),
dict(operator='is', operand='unread'),
]
self.assertFalse(ok_to_include_history(narrow, user_profile))
narrow = [
dict(operator='streams', operand='public'),
dict(operator='is', operand='alerted'),
]
self.assertFalse(ok_to_include_history(narrow, user_profile))
# simple True case
narrow = [
dict(operator='stream', operand='public_stream'),
]
self.assertTrue(ok_to_include_history(narrow, user_profile))
narrow = [
dict(operator='stream', operand='public_stream'),
dict(operator='topic', operand='whatever'),
dict(operator='search', operand='needle in haystack'),
]
self.assertTrue(ok_to_include_history(narrow, user_profile))
# Tests for guest user
guest_user_profile = self.example_user("polonius")
# Using 'Cordelia' to compare between a guest and a normal user
subscribed_user_profile = self.example_user("cordelia")
# streams:public searches should not include history for guest members.
narrow = [
dict(operator='streams', operand='public'),
]
self.assertFalse(ok_to_include_history(narrow, guest_user_profile))
# Guest user can't access public stream
self.subscribe(subscribed_user_profile, 'public_stream_2')
narrow = [
dict(operator='stream', operand='public_stream_2'),
]
self.assertFalse(ok_to_include_history(narrow, guest_user_profile))
self.assertTrue(ok_to_include_history(narrow, subscribed_user_profile))
# Definitely, a guest user can't access the unsubscribed private stream
self.subscribe(subscribed_user_profile, 'private_stream_3')
narrow = [
dict(operator='stream', operand='private_stream_3'),
]
self.assertFalse(ok_to_include_history(narrow, guest_user_profile))
self.assertTrue(ok_to_include_history(narrow, subscribed_user_profile))
# Guest user can access (history of) subscribed private streams
self.subscribe(guest_user_profile, 'private_stream_4')
self.subscribe(subscribed_user_profile, 'private_stream_4')
narrow = [
dict(operator='stream', operand='private_stream_4'),
]
self.assertTrue(ok_to_include_history(narrow, guest_user_profile))
self.assertTrue(ok_to_include_history(narrow, subscribed_user_profile))
class PostProcessTest(ZulipTestCase):
def test_basics(self) -> None:
def verify(in_ids: List[int],
num_before: int,
num_after: int,
first_visible_message_id: int,
anchor: int,
anchored_to_left: bool,
anchored_to_right: bool,
out_ids: List[int],
found_anchor: bool,
found_oldest: bool,
found_newest: bool,
history_limited: bool) -> None:
in_rows = [[row_id] for row_id in in_ids]
out_rows = [[row_id] for row_id in out_ids]
info = post_process_limited_query(
rows=in_rows,
num_before=num_before,
num_after=num_after,
anchor=anchor,
anchored_to_left=anchored_to_left,
anchored_to_right=anchored_to_right,
first_visible_message_id=first_visible_message_id,
)
self.assertEqual(info['rows'], out_rows)
self.assertEqual(info['found_anchor'], found_anchor)
self.assertEqual(info['found_newest'], found_newest)
self.assertEqual(info['found_oldest'], found_oldest)
self.assertEqual(info['history_limited'], history_limited)
# typical 2-sided query, with a bunch of tests for different
# values of first_visible_message_id.
anchor = 10
verify(
in_ids=[8, 9, anchor, 11, 12],
num_before=2, num_after=2,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[8, 9, 10, 11, 12],
found_anchor=True, found_oldest=False,
found_newest=False, history_limited=False,
)
verify(
in_ids=[8, 9, anchor, 11, 12],
num_before=2, num_after=2,
first_visible_message_id=8,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[8, 9, 10, 11, 12],
found_anchor=True, found_oldest=False,
found_newest=False, history_limited=False,
)
verify(
in_ids=[8, 9, anchor, 11, 12],
num_before=2, num_after=2,
first_visible_message_id=9,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[9, 10, 11, 12],
found_anchor=True, found_oldest=True,
found_newest=False, history_limited=True,
)
verify(
in_ids=[8, 9, anchor, 11, 12],
num_before=2, num_after=2,
first_visible_message_id=10,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[10, 11, 12],
found_anchor=True, found_oldest=True,
found_newest=False, history_limited=True,
)
verify(
in_ids=[8, 9, anchor, 11, 12],
num_before=2, num_after=2,
first_visible_message_id=11,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[11, 12],
found_anchor=False, found_oldest=True,
found_newest=False, history_limited=True,
)
verify(
in_ids=[8, 9, anchor, 11, 12],
num_before=2, num_after=2,
first_visible_message_id=12,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[12],
found_anchor=False, found_oldest=True,
found_newest=True, history_limited=True,
)
verify(
in_ids=[8, 9, anchor, 11, 12],
num_before=2, num_after=2,
first_visible_message_id=13,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[],
found_anchor=False, found_oldest=True,
found_newest=True, history_limited=True,
)
# typical 2-sided query missing anchor and grabbing an extra row
anchor = 10
verify(
in_ids=[7, 9, 11, 13, 15],
num_before=2, num_after=2,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
first_visible_message_id=0,
out_ids=[7, 9, 11, 13],
found_anchor=False, found_oldest=False,
found_newest=False, history_limited=False,
)
verify(
in_ids=[7, 9, 11, 13, 15],
num_before=2, num_after=2,
first_visible_message_id=10,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[11, 13],
found_anchor=False, found_oldest=True,
found_newest=False, history_limited=True,
)
verify(
in_ids=[7, 9, 11, 13, 15],
num_before=2, num_after=2,
first_visible_message_id=9,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[9, 11, 13],
found_anchor=False, found_oldest=True,
found_newest=False, history_limited=True,
)
# 2-sided query with old anchor
anchor = 100
verify(
in_ids=[50, anchor, 150, 200],
num_before=2, num_after=2,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[50, 100, 150, 200],
found_anchor=True, found_oldest=True,
found_newest=False, history_limited=False,
)
verify(
in_ids=[50, anchor, 150, 200],
num_before=2, num_after=2,
first_visible_message_id=anchor,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[100, 150, 200],
found_anchor=True, found_oldest=True,
found_newest=False, history_limited=True,
)
# 2-sided query with new anchor
anchor = 900
verify(
in_ids=[700, 800, anchor, 1000],
num_before=2, num_after=2,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[700, 800, 900, 1000],
found_anchor=True, found_oldest=False,
found_newest=True, history_limited=False,
)
verify(
in_ids=[700, 800, anchor, 1000],
num_before=2, num_after=2,
first_visible_message_id=anchor,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[900, 1000],
found_anchor=True, found_oldest=True,
found_newest=True, history_limited=True,
)
# left-sided query with old anchor
anchor = 100
verify(
in_ids=[50, anchor],
num_before=2, num_after=0,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[50, 100],
found_anchor=True, found_oldest=True,
found_newest=False, history_limited=False,
)
verify(
in_ids=[50, anchor],
num_before=2, num_after=0,
first_visible_message_id=anchor,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[100],
found_anchor=True, found_oldest=True,
found_newest=False, history_limited=True,
)
# left-sided query with new anchor
anchor = 900
verify(
in_ids=[700, 800, anchor],
num_before=2, num_after=0,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[700, 800, 900],
found_anchor=True, found_oldest=False,
found_newest=False, history_limited=False,
)
verify(
in_ids=[700, 800, anchor],
num_before=2, num_after=0,
first_visible_message_id=anchor,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[900],
found_anchor=True, found_oldest=True,
found_newest=False, history_limited=True,
)
# left-sided query with new anchor and extra row
anchor = 900
verify(
in_ids=[600, 700, 800, anchor],
num_before=2, num_after=0,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[700, 800, 900],
found_anchor=True, found_oldest=False,
found_newest=False, history_limited=False,
)
verify(
in_ids=[600, 700, 800, anchor],
num_before=2, num_after=0,
first_visible_message_id=anchor,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[900],
found_anchor=True, found_oldest=True,
found_newest=False, history_limited=True,
)
# left-sided query anchored to the right
anchor = None
verify(
in_ids=[900, 1000],
num_before=2, num_after=0,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=True,
out_ids=[900, 1000],
found_anchor=False, found_oldest=False,
found_newest=True, history_limited=False,
)
verify(
in_ids=[900, 1000],
num_before=2, num_after=0,
first_visible_message_id=1000,
anchor=anchor, anchored_to_left=False, anchored_to_right=True,
out_ids=[1000],
found_anchor=False, found_oldest=True,
found_newest=True, history_limited=True,
)
verify(
in_ids=[900, 1000],
num_before=2, num_after=0,
first_visible_message_id=1100,
anchor=anchor, anchored_to_left=False, anchored_to_right=True,
out_ids=[],
found_anchor=False, found_oldest=True,
found_newest=True, history_limited=True,
)
# right-sided query with old anchor
anchor = 100
verify(
in_ids=[anchor, 200, 300, 400],
num_before=0, num_after=2,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[100, 200, 300],
found_anchor=True, found_oldest=False,
found_newest=False, history_limited=False,
)
verify(
in_ids=[anchor, 200, 300, 400],
num_before=0, num_after=2,
first_visible_message_id=anchor,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[100, 200, 300],
found_anchor=True, found_oldest=False,
found_newest=False, history_limited=False,
)
verify(
in_ids=[anchor, 200, 300, 400],
num_before=0, num_after=2,
first_visible_message_id=300,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[300, 400],
found_anchor=False, found_oldest=False,
# BUG: history_limited should be False here.
found_newest=False, history_limited=False,
)
# right-sided query with new anchor
anchor = 900
verify(
in_ids=[anchor, 1000],
num_before=0, num_after=2,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[900, 1000],
found_anchor=True, found_oldest=False,
found_newest=True, history_limited=False,
)
verify(
in_ids=[anchor, 1000],
num_before=0, num_after=2,
first_visible_message_id=anchor,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[900, 1000],
found_anchor=True, found_oldest=False,
found_newest=True, history_limited=False,
)
# right-sided query with non-matching anchor
anchor = 903
verify(
in_ids=[1000, 1100, 1200],
num_before=0, num_after=2,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[1000, 1100],
found_anchor=False, found_oldest=False,
found_newest=False, history_limited=False,
)
verify(
in_ids=[1000, 1100, 1200],
num_before=0, num_after=2,
first_visible_message_id=anchor,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[1000, 1100],
found_anchor=False, found_oldest=False,
found_newest=False, history_limited=False,
)
verify(
in_ids=[1000, 1100, 1200],
num_before=0, num_after=2,
first_visible_message_id=1000,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[1000, 1100],
found_anchor=False, found_oldest=False,
found_newest=False, history_limited=False,
)
verify(
in_ids=[1000, 1100, 1200],
num_before=0, num_after=2,
first_visible_message_id=1100,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[1100, 1200],
found_anchor=False, found_oldest=False,
# BUG: history_limited should be False here.
found_newest=False, history_limited=False,
)
# targeted query that finds row
anchor = 1000
verify(
in_ids=[1000],
num_before=0, num_after=0,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[1000],
found_anchor=True, found_oldest=False,
found_newest=False, history_limited=False
)
verify(
in_ids=[1000],
num_before=0, num_after=0,
first_visible_message_id=anchor,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[1000],
found_anchor=True, found_oldest=False,
found_newest=False, history_limited=False
)
verify(
in_ids=[1000],
num_before=0, num_after=0,
first_visible_message_id=1100,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[],
found_anchor=False, found_oldest=False,
found_newest=False, history_limited=False,
)
# targeted query that finds nothing
anchor = 903
verify(
in_ids=[],
num_before=0, num_after=0,
first_visible_message_id=0,
anchor=anchor, anchored_to_left=False, anchored_to_right=False,
out_ids=[],
found_anchor=False, found_oldest=False,
found_newest=False, history_limited=False
)
class GetOldMessagesTest(ZulipTestCase):
def get_and_check_messages(self,
modified_params: Dict[str, Union[str, int]],
**kwargs: Any) -> Dict[str, Any]:
post_params = {"anchor": 1, "num_before": 1, "num_after": 1} # type: Dict[str, Union[str, int]]
post_params.update(modified_params)
payload = self.client_get("/json/messages", dict(post_params),
**kwargs)
self.assert_json_success(payload)
self.assertEqual(set(payload["Cache-Control"].split(", ")),
{"must-revalidate", "no-store", "no-cache", "max-age=0"})
result = ujson.loads(payload.content)
self.assertIn("messages", result)
self.assertIsInstance(result["messages"], list)
for message in result["messages"]:
for field in ("content", "content_type", "display_recipient",
"avatar_url", "recipient_id", "sender_full_name",
"sender_short_name", "timestamp", "reactions"):
self.assertIn(field, message)
return result
def message_visibility_test(self, narrow: List[Dict[str, str]],
message_ids: List[int], pivot_index: int) -> None:
num_before = len(message_ids)
post_params = dict(narrow=ujson.dumps(narrow), num_before=num_before,
num_after=0, anchor=LARGER_THAN_MAX_MESSAGE_ID)
payload = self.client_get("/json/messages", dict(post_params))
self.assert_json_success(payload)
result = ujson.loads(payload.content)
self.assertEqual(len(result["messages"]), len(message_ids))
for message in result["messages"]:
assert(message["id"] in message_ids)
post_params.update({"num_before": len(message_ids[pivot_index:])})
with first_visible_id_as(message_ids[pivot_index]):
payload = self.client_get("/json/messages", dict(post_params))
self.assert_json_success(payload)
result = ujson.loads(payload.content)
self.assertEqual(len(result["messages"]), len(message_ids[pivot_index:]))
for message in result["messages"]:
assert(message["id"] in message_ids)
def get_query_ids(self) -> Dict[str, Union[int, str]]:
hamlet_user = self.example_user('hamlet')
othello_user = self.example_user('othello')
query_ids = {} # type: Dict[str, Union[int, str]]
scotland_stream = get_stream('Scotland', hamlet_user.realm)
query_ids['scotland_recipient'] = get_stream_recipient(scotland_stream.id).id
query_ids['hamlet_id'] = hamlet_user.id
query_ids['othello_id'] = othello_user.id
query_ids['hamlet_recipient'] = get_personal_recipient(hamlet_user.id).id
query_ids['othello_recipient'] = get_personal_recipient(othello_user.id).id
recipients = Recipient.objects.filter(
type=Recipient.STREAM,
type_id__in=Stream.objects.filter(realm=hamlet_user.realm, invite_only=False),
).values('id').order_by('id')
query_ids['public_streams_recipents'] = ", ".join(str(r['id']) for r in recipients)
return query_ids
def test_content_types(self) -> None:
"""
Test old `/json/messages` returns reactions.
"""
self.login(self.example_email("hamlet"))
def get_content_type(apply_markdown: bool) -> str:
req = dict(
apply_markdown=ujson.dumps(apply_markdown),
) # type: Dict[str, Any]
result = self.get_and_check_messages(req)
message = result['messages'][0]
return message['content_type']
self.assertEqual(
get_content_type(apply_markdown=False),
'text/x-markdown',
)
self.assertEqual(
get_content_type(apply_markdown=True),
'text/html',
)
def test_successful_get_messages_reaction(self) -> None:
"""
Test old `/json/messages` returns reactions.
"""
self.login(self.example_email("hamlet"))
messages = self.get_and_check_messages(dict())
message_id = messages['messages'][0]['id']
self.login(self.example_email("othello"))
reaction_name = 'thumbs_up'
reaction_info = {
'emoji_name': reaction_name
}
url = '/json/messages/{}/reactions'.format(message_id)
payload = self.client_post(url, reaction_info)
self.assert_json_success(payload)
self.login(self.example_email("hamlet"))
messages = self.get_and_check_messages({})
message_to_assert = None
for message in messages['messages']:
if message['id'] == message_id:
message_to_assert = message
break
assert(message_to_assert is not None)
self.assertEqual(len(message_to_assert['reactions']), 1)
self.assertEqual(message_to_assert['reactions'][0]['emoji_name'],
reaction_name)
def test_successful_get_messages(self) -> None:
"""
A call to GET /json/messages with valid parameters returns a list of
messages.
"""
self.login(self.example_email("hamlet"))
self.get_and_check_messages(dict())
# We have to support the legacy tuple style while there are old
# clients around, which might include third party home-grown bots.
self.get_and_check_messages(dict(narrow=ujson.dumps([['pm-with', self.example_email("othello")]])))
self.get_and_check_messages(dict(narrow=ujson.dumps([dict(operator='pm-with', operand=self.example_email("othello"))])))
def test_client_avatar(self) -> None:
"""
The client_gravatar flag determines whether we send avatar_url.
"""
hamlet = self.example_user('hamlet')
self.login(hamlet.email)
self.send_personal_message(hamlet.email, self.example_email("iago"))
result = self.get_and_check_messages({})
message = result['messages'][0]
self.assertIn('gravatar.com', message['avatar_url'])
result = self.get_and_check_messages(dict(client_gravatar=ujson.dumps(True)))
message = result['messages'][0]
self.assertEqual(message['avatar_url'], None)
# Now verify client_gravatar doesn't run with EMAIL_ADDRESS_VISIBILITY_ADMINS
do_set_realm_property(hamlet.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
result = self.get_and_check_messages(dict(client_gravatar=ujson.dumps(True)))
message = result['messages'][0]
self.assertIn('gravatar.com', message['avatar_url'])
def test_get_messages_with_narrow_pm_with(self) -> None:
"""
A request for old messages with a narrow by pm-with only returns
conversations with that user.
"""
me = self.example_email('hamlet')
def dr_emails(dr: DisplayRecipientT) -> str:
assert isinstance(dr, list)
return ','.join(sorted(set([r['email'] for r in dr] + [me])))
def dr_ids(dr: DisplayRecipientT) -> List[int]:
assert isinstance(dr, list)
return list(sorted(set([r['id'] for r in dr] + [self.example_user('hamlet').id])))
self.send_personal_message(me, self.example_email("iago"))
self.send_huddle_message(
me,
[self.example_email("iago"), self.example_email("cordelia")],
)
# Send a 1:1 and group PM containing Aaron.
# Then deactivate aaron to test pm-with narrow includes messages
# from deactivated users also.
self.send_personal_message(me, self.example_email("aaron"))
self.send_huddle_message(
me,
[self.example_email("iago"), self.example_email("aaron")],
)
aaron = self.example_user("aaron")
do_deactivate_user(aaron)
self.assertFalse(aaron.is_active)
personals = [m for m in get_user_messages(self.example_user('hamlet'))
if not m.is_stream_message()]
for personal in personals:
emails = dr_emails(get_display_recipient(personal.recipient))
self.login(me)
narrow = [dict(operator='pm-with', operand=emails)] # type: List[Dict[str, Any]]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow)))
for message in result["messages"]:
self.assertEqual(dr_emails(message['display_recipient']), emails)
# check passing id is conistent with passing emails as operand
ids = dr_ids(get_display_recipient(personal.recipient))
narrow = [dict(operator='pm-with', operand=ids)]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow)))
for message in result["messages"]:
self.assertEqual(dr_emails(message['display_recipient']), emails)
def test_get_visible_messages_with_narrow_pm_with(self) -> None:
me = self.example_email('hamlet')
self.login(me)
self.subscribe(self.example_user("hamlet"), 'Scotland')
message_ids = []
for i in range(5):
message_ids.append(self.send_personal_message(me, self.example_email("iago")))
narrow = [dict(operator='pm-with', operand=self.example_email("iago"))]
self.message_visibility_test(narrow, message_ids, 2)
def test_get_messages_with_narrow_group_pm_with(self) -> None:
"""
A request for old messages with a narrow by group-pm-with only returns
group-private conversations with that user.
"""
me = self.example_email("hamlet")
matching_message_ids = []
matching_message_ids.append(
self.send_huddle_message(
me,
[
self.example_email("iago"),
self.example_email("cordelia"),
self.example_email("othello"),
],
),
)
matching_message_ids.append(
self.send_huddle_message(
me,
[
self.example_email("cordelia"),
self.example_email("othello"),
],
),
)
non_matching_message_ids = []
non_matching_message_ids.append(
self.send_personal_message(me, self.example_email("cordelia")),
)
non_matching_message_ids.append(
self.send_huddle_message(
me,
[
self.example_email("iago"),
self.example_email("othello"),
],
),
)
non_matching_message_ids.append(
self.send_huddle_message(
self.example_email("cordelia"),
[
self.example_email("iago"),
self.example_email("othello"),
],
),
)
self.login(me)
test_operands = [self.example_email("cordelia"), self.example_user("cordelia").id]
for operand in test_operands:
narrow = [dict(operator='group-pm-with', operand=operand)]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow)))
for message in result["messages"]:
self.assertIn(message["id"], matching_message_ids)
self.assertNotIn(message["id"], non_matching_message_ids)
def test_get_visible_messages_with_narrow_group_pm_with(self) -> None:
me = self.example_email('hamlet')
self.login(me)
message_ids = []
message_ids.append(
self.send_huddle_message(
me,
[
self.example_email("iago"),
self.example_email("cordelia"),
self.example_email("othello"),
],
),
)
message_ids.append(
self.send_huddle_message(
me,
[
self.example_email("cordelia"),
self.example_email("othello"),
],
),
)
message_ids.append(
self.send_huddle_message(
me,
[
self.example_email("cordelia"),
self.example_email("iago"),
],
),
)
narrow = [dict(operator='group-pm-with', operand=self.example_email("cordelia"))]
self.message_visibility_test(narrow, message_ids, 1)
def test_include_history(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
stream_name = 'test stream'
self.subscribe(cordelia, stream_name)
old_message_id = self.send_stream_message(cordelia.email, stream_name, content='foo')
self.subscribe(hamlet, stream_name)
content = 'hello @**King Hamlet**'
new_message_id = self.send_stream_message(cordelia.email, stream_name, content=content)
self.login(hamlet.email)
narrow = [
dict(operator='stream', operand=stream_name)
]
req = dict(
narrow=ujson.dumps(narrow),
anchor=LARGER_THAN_MAX_MESSAGE_ID,
num_before=100,
num_after=100,
)
payload = self.client_get('/json/messages', req)
self.assert_json_success(payload)
result = ujson.loads(payload.content)
messages = result['messages']
self.assertEqual(len(messages), 2)
for message in messages:
if message['id'] == old_message_id:
old_message = message
elif message['id'] == new_message_id:
new_message = message
self.assertEqual(old_message['flags'], ['read', 'historical'])
self.assertEqual(new_message['flags'], ['mentioned'])
def test_get_messages_with_narrow_stream(self) -> None:
"""
A request for old messages with a narrow by stream only returns
messages for that stream.
"""
self.login(self.example_email('hamlet'))
# We need to subscribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
self.subscribe(self.example_user("hamlet"), 'Scotland')
self.send_stream_message(self.example_email("hamlet"), "Scotland")
messages = get_user_messages(self.example_user('hamlet'))
stream_messages = [msg for msg in messages if msg.is_stream_message()]
stream_name = get_display_recipient(stream_messages[0].recipient)
assert isinstance(stream_name, str)
stream_id = get_stream(stream_name, stream_messages[0].get_realm()).id
stream_recipient_id = stream_messages[0].recipient.id
for operand in [stream_name, stream_id]:
narrow = [dict(operator='stream', operand=operand)]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow)))
for message in result["messages"]:
self.assertEqual(message["type"], "stream")
self.assertEqual(message["recipient_id"], stream_recipient_id)
def test_get_visible_messages_with_narrow_stream(self) -> None:
self.login(self.example_email('hamlet'))
self.subscribe(self.example_user("hamlet"), 'Scotland')
message_ids = []
for i in range(5):
message_ids.append(self.send_stream_message(self.example_email("iago"), "Scotland"))
narrow = [dict(operator='stream', operand="Scotland")]
self.message_visibility_test(narrow, message_ids, 2)
def test_get_messages_with_narrow_stream_mit_unicode_regex(self) -> None:
"""
A request for old messages for a user in the mit.edu relam with unicode
stream name should be correctly escaped in the database query.
"""
self.login(self.mit_email("starnine"), realm=get_realm("zephyr"))
# We need to susbcribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
lambda_stream_name = u"\u03bb-stream"
stream = self.subscribe(self.mit_user("starnine"), lambda_stream_name)
self.assertTrue(stream.is_in_zephyr_realm)
lambda_stream_d_name = u"\u03bb-stream.d"
self.subscribe(self.mit_user("starnine"), lambda_stream_d_name)
self.send_stream_message(self.mit_email("starnine"), u"\u03bb-stream", sender_realm="zephyr")
self.send_stream_message(self.mit_email("starnine"), u"\u03bb-stream.d", sender_realm="zephyr")
narrow = [dict(operator='stream', operand=u'\u03bb-stream')]
result = self.get_and_check_messages(dict(num_after=2,
narrow=ujson.dumps(narrow)),
subdomain="zephyr")
messages = get_user_messages(self.mit_user("starnine"))
stream_messages = [msg for msg in messages if msg.is_stream_message()]
self.assertEqual(len(result["messages"]), 2)
for i, message in enumerate(result["messages"]):
self.assertEqual(message["type"], "stream")
stream_id = stream_messages[i].recipient.id
self.assertEqual(message["recipient_id"], stream_id)
def test_get_messages_with_narrow_topic_mit_unicode_regex(self) -> None:
"""
A request for old messages for a user in the mit.edu realm with unicode
topic name should be correctly escaped in the database query.
"""
mit_user_profile = self.mit_user("starnine")
email = mit_user_profile.email
self.login(email, realm=get_realm("zephyr"))
# We need to susbcribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
self.subscribe(mit_user_profile, "Scotland")
self.send_stream_message(email, "Scotland", topic_name=u"\u03bb-topic",
sender_realm="zephyr")
self.send_stream_message(email, "Scotland", topic_name=u"\u03bb-topic.d",
sender_realm="zephyr")
self.send_stream_message(email, "Scotland", topic_name=u"\u03bb-topic.d.d",
sender_realm="zephyr")
self.send_stream_message(email, "Scotland", topic_name=u"\u03bb-topic.d.d.d",
sender_realm="zephyr")
self.send_stream_message(email, "Scotland", topic_name=u"\u03bb-topic.d.d.d.d",
sender_realm="zephyr")
narrow = [dict(operator='topic', operand=u'\u03bb-topic')]
result = self.get_and_check_messages(
dict(num_after=100, narrow=ujson.dumps(narrow)),
subdomain="zephyr")
messages = get_user_messages(mit_user_profile)
stream_messages = [msg for msg in messages if msg.is_stream_message()]
self.assertEqual(len(result["messages"]), 5)
for i, message in enumerate(result["messages"]):
self.assertEqual(message["type"], "stream")
stream_id = stream_messages[i].recipient.id
self.assertEqual(message["recipient_id"], stream_id)
def test_get_messages_with_narrow_topic_mit_personal(self) -> None:
"""
We handle .d grouping for MIT realm personal messages correctly.
"""
mit_user_profile = self.mit_user("starnine")
email = mit_user_profile.email
# We need to susbcribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
self.login(email, realm=mit_user_profile.realm)
self.subscribe(mit_user_profile, "Scotland")
self.send_stream_message(email, "Scotland", topic_name=u".d.d",
sender_realm="zephyr")
self.send_stream_message(email, "Scotland", topic_name=u"PERSONAL",
sender_realm="zephyr")
self.send_stream_message(email, "Scotland", topic_name=u'(instance "").d',
sender_realm="zephyr")
self.send_stream_message(email, "Scotland", topic_name=u".d.d.d",
sender_realm="zephyr")
self.send_stream_message(email, "Scotland", topic_name=u"personal.d",
sender_realm="zephyr")
self.send_stream_message(email, "Scotland", topic_name=u'(instance "")',
sender_realm="zephyr")
self.send_stream_message(email, "Scotland", topic_name=u".d.d.d.d",
sender_realm="zephyr")
narrow = [dict(operator='topic', operand=u'personal.d.d')]
result = self.get_and_check_messages(
dict(num_before=50,
num_after=50,
narrow=ujson.dumps(narrow)),
subdomain="zephyr")
messages = get_user_messages(mit_user_profile)
stream_messages = [msg for msg in messages if msg.is_stream_message()]
self.assertEqual(len(result["messages"]), 7)
for i, message in enumerate(result["messages"]):
self.assertEqual(message["type"], "stream")
stream_id = stream_messages[i].recipient.id
self.assertEqual(message["recipient_id"], stream_id)
def test_get_messages_with_narrow_sender(self) -> None:
"""
A request for old messages with a narrow by sender only returns
messages sent by that person.
"""
self.login(self.example_email("hamlet"))
# We need to send a message here to ensure that we actually
# have a stream message in this narrow view.
self.send_stream_message(self.example_email("hamlet"), "Scotland")
self.send_stream_message(self.example_email("othello"), "Scotland")
self.send_personal_message(self.example_email("othello"), self.example_email("hamlet"))
self.send_stream_message(self.example_email("iago"), "Scotland")
test_operands = [self.example_email("othello"), self.example_user("othello").id]
for operand in test_operands:
narrow = [dict(operator='sender', operand=operand)]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow)))
for message in result["messages"]:
self.assertEqual(message["sender_email"], self.example_email("othello"))
def _update_tsvector_index(self) -> None:
# We use brute force here and update our text search index
# for the entire zerver_message table (which is small in test
# mode). In production there is an async process which keeps
# the search index up to date.
with connection.cursor() as cursor:
cursor.execute("""
UPDATE zerver_message SET
search_tsvector = to_tsvector('zulip.english_us_search',
subject || rendered_content)
""")
@override_settings(USING_PGROONGA=False)
def test_messages_in_narrow(self) -> None:
email = self.example_email("cordelia")
self.login(email)
def send(content: str) -> int:
msg_id = self.send_stream_message(
sender_email=email,
stream_name="Verona",
content=content,
)
return msg_id
good_id = send('KEYWORDMATCH and should work')
bad_id = send('no match')
msg_ids = [good_id, bad_id]
send('KEYWORDMATCH but not in msg_ids')
self._update_tsvector_index()
narrow = [
dict(operator='search', operand='KEYWORDMATCH'),
]
raw_params = dict(msg_ids=msg_ids, narrow=narrow)
params = {k: ujson.dumps(v) for k, v in raw_params.items()}
result = self.client_get('/json/messages/matches_narrow', params)
self.assert_json_success(result)
messages = result.json()['messages']
self.assertEqual(len(list(messages.keys())), 1)
message = messages[str(good_id)]
self.assertEqual(message['match_content'],
u'<p><span class="highlight">KEYWORDMATCH</span> and should work</p>')
@override_settings(USING_PGROONGA=False)
def test_get_messages_with_search(self) -> None:
self.login(self.example_email("cordelia"))
messages_to_search = [
('breakfast', 'there are muffins in the conference room'),
('lunch plans', 'I am hungry!'),
('meetings', 'discuss lunch after lunch'),
('meetings', 'please bring your laptops to take notes'),
('dinner', 'Anybody staying late tonight?'),
('urltest', 'https://google.com'),
(u'日本', u'こんに ちは 。 今日は いい 天気ですね。'),
(u'日本', u'今朝はごはんを食べました。'),
(u'日本', u'昨日、日本 のお菓子を送りました。'),
('english', u'I want to go to 日本!'),
]
next_message_id = self.get_last_message().id + 1
for topic, content in messages_to_search:
self.send_stream_message(
sender_email=self.example_email("cordelia"),
stream_name="Verona",
content=content,
topic_name=topic,
)
self._update_tsvector_index()
narrow = [
dict(operator='sender', operand=self.example_email("cordelia")),
dict(operator='search', operand='lunch'),
]
result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_before=0,
num_after=10,
)) # type: Dict[str, Any]
self.assertEqual(len(result['messages']), 2)
messages = result['messages']
narrow = [dict(operator='search', operand='https://google.com')]
link_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_before=0,
num_after=10,
)) # type: Dict[str, Any]
self.assertEqual(len(link_search_result['messages']), 1)
self.assertEqual(link_search_result['messages'][0]['match_content'],
'<p><a href="https://google.com" target="_blank" title="https://google.com">https://<span class="highlight">google.com</span></a></p>')
meeting_message = [m for m in messages if m[TOPIC_NAME] == 'meetings'][0]
self.assertEqual(
meeting_message[MATCH_TOPIC],
'meetings')
self.assertEqual(
meeting_message['match_content'],
'<p>discuss <span class="highlight">lunch</span> after ' +
'<span class="highlight">lunch</span></p>')
meeting_message = [m for m in messages if m[TOPIC_NAME] == 'lunch plans'][0]
self.assertEqual(
meeting_message[MATCH_TOPIC],
'<span class="highlight">lunch</span> plans')
self.assertEqual(
meeting_message['match_content'],
'<p>I am hungry!</p>')
# Should not crash when multiple search operands are present
multi_search_narrow = [
dict(operator='search', operand='discuss'),
dict(operator='search', operand='after'),
]
multi_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(multi_search_narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
)) # type: Dict[str, Any]
self.assertEqual(len(multi_search_result['messages']), 1)
self.assertEqual(multi_search_result['messages'][0]['match_content'], '<p><span class="highlight">discuss</span> lunch <span class="highlight">after</span> lunch</p>')
# Test searching in messages with unicode characters
narrow = [
dict(operator='search', operand=u'日本'),
]
result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
))
self.assertEqual(len(result['messages']), 4)
messages = result['messages']
japanese_message = [m for m in messages if m[TOPIC_NAME] == u'日本'][-1]
self.assertEqual(
japanese_message[MATCH_TOPIC],
u'<span class="highlight">日本</span>')
self.assertEqual(
japanese_message['match_content'],
u'<p>昨日、<span class="highlight">日本</span>' +
u' のお菓子を送りました。</p>')
english_message = [m for m in messages if m[TOPIC_NAME] == 'english'][0]
self.assertEqual(
english_message[MATCH_TOPIC],
'english')
self.assertIn(
english_message['match_content'],
u'<p>I want to go to <span class="highlight">日本</span>!</p>')
# Multiple search operands with unicode
multi_search_narrow = [
dict(operator='search', operand='ちは'),
dict(operator='search', operand='今日は'),
]
multi_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(multi_search_narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
))
self.assertEqual(len(multi_search_result['messages']), 1)
self.assertEqual(multi_search_result['messages'][0]['match_content'],
'<p>こんに <span class="highlight">ちは</span> 。 <span class="highlight">今日は</span> いい 天気ですね。</p>')
@override_settings(USING_PGROONGA=False)
def test_get_visible_messages_with_search(self) -> None:
self.login(self.example_email('hamlet'))
self.subscribe(self.example_user("hamlet"), 'Scotland')
messages_to_search = [
("Gryffindor", "Hogwart's house which values courage, bravery, nerve, and chivalry"),
("Hufflepuff", "Hogwart's house which values hard work, patience, justice, and loyalty."),
("Ravenclaw", "Hogwart's house which values intelligence, creativity, learning, and wit"),
("Slytherin", "Hogwart's house which values ambition, cunning, leadership, and resourcefulness"),
]
message_ids = []
for topic, content in messages_to_search:
message_ids.append(self.send_stream_message(self.example_email("iago"), "Scotland",
topic_name=topic, content=content))
self._update_tsvector_index()
narrow = [dict(operator='search', operand="Hogwart's")]
self.message_visibility_test(narrow, message_ids, 2)
@override_settings(USING_PGROONGA=False)
def test_get_messages_with_search_not_subscribed(self) -> None:
"""Verify support for searching a stream you're not subscribed to"""
self.subscribe(self.example_user("hamlet"), "newstream")
self.send_stream_message(
sender_email=self.example_email("hamlet"),
stream_name="newstream",
content="Public special content!",
topic_name="new",
)
self._update_tsvector_index()
self.login(self.example_email("cordelia"))
stream_search_narrow = [
dict(operator='search', operand='special'),
dict(operator='stream', operand='newstream'),
]
stream_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(stream_search_narrow),
anchor=0,
num_after=10,
num_before=10,
)) # type: Dict[str, Any]
self.assertEqual(len(stream_search_result['messages']), 1)
self.assertEqual(stream_search_result['messages'][0]['match_content'],
'<p>Public <span class="highlight">special</span> content!</p>')
@override_settings(USING_PGROONGA=True)
def test_get_messages_with_search_pgroonga(self) -> None:
self.login(self.example_email("cordelia"))
next_message_id = self.get_last_message().id + 1
messages_to_search = [
(u'日本語', u'こんにちは。今日はいい天気ですね。'),
(u'日本語', u'今朝はごはんを食べました。'),
(u'日本語', u'昨日、日本のお菓子を送りました。'),
('english', u'I want to go to 日本!'),
('english', 'Can you speak https://en.wikipedia.org/wiki/Japanese?'),
('english', 'https://google.com'),
('bread & butter', 'chalk & cheese'),
]
for topic, content in messages_to_search:
self.send_stream_message(
sender_email=self.example_email("cordelia"),
stream_name="Verona",
content=content,
topic_name=topic,
)
# We use brute force here and update our text search index
# for the entire zerver_message table (which is small in test
# mode). In production there is an async process which keeps
# the search index up to date.
with connection.cursor() as cursor:
cursor.execute("""
UPDATE zerver_message SET
search_pgroonga = escape_html(subject) || ' ' || rendered_content
""")
narrow = [
dict(operator='search', operand=u'日本'),
]
result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
)) # type: Dict[str, Any]
self.assertEqual(len(result['messages']), 4)
messages = result['messages']
japanese_message = [m for m in messages if m[TOPIC_NAME] == u'日本語'][-1]
self.assertEqual(
japanese_message[MATCH_TOPIC],
u'<span class="highlight">日本</span>語')
self.assertEqual(
japanese_message['match_content'],
u'<p>昨日、<span class="highlight">日本</span>の' +
u'お菓子を送りました。</p>')
english_message = [m for m in messages if m[TOPIC_NAME] == 'english'][0]
self.assertEqual(
english_message[MATCH_TOPIC],
'english')
self.assertIn(
english_message['match_content'],
# NOTE: The whitespace here is off due to a pgroonga bug.
# This bug is a pgroonga regression and according to one of
# the author, this should be fixed in its next release.
[u'<p>I want to go to <span class="highlight">日本</span>!</p>', # This is correct.
u'<p>I want to go to<span class="highlight"> 日本</span>!</p>', ])
# Should not crash when multiple search operands are present
multi_search_narrow = [
dict(operator='search', operand='can'),
dict(operator='search', operand='speak'),
dict(operator='search', operand='wiki'),
]
multi_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(multi_search_narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
)) # type: Dict[str, Any]
self.assertEqual(len(multi_search_result['messages']), 1)
self.assertEqual(multi_search_result['messages'][0]['match_content'],
'<p><span class="highlight">Can</span> you <span class="highlight">speak</span> <a href="https://en.wikipedia.org/wiki/Japanese" target="_blank" title="https://en.wikipedia.org/wiki/Japanese">https://en.<span class="highlight">wiki</span>pedia.org/<span class="highlight">wiki</span>/Japanese</a>?</p>')
# Multiple search operands with unicode
multi_search_narrow = [
dict(operator='search', operand='朝は'),
dict(operator='search', operand='べました'),
]
multi_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(multi_search_narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
))
self.assertEqual(len(multi_search_result['messages']), 1)
self.assertEqual(multi_search_result['messages'][0]['match_content'],
'<p>今<span class="highlight">朝は</span>ごはんを食<span class="highlight">べました</span>。</p>')
narrow = [dict(operator='search', operand='https://google.com')]
link_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
)) # type: Dict[str, Any]
self.assertEqual(len(link_search_result['messages']), 1)
self.assertEqual(link_search_result['messages'][0]['match_content'],
'<p><a href="https://google.com" target="_blank" title="https://google.com"><span class="highlight">https://google.com</span></a></p>')
# Search operands with HTML Special Characters
special_search_narrow = [
dict(operator='search', operand='butter'),
]
special_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(special_search_narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
)) # type: Dict[str, Any]
self.assertEqual(len(special_search_result['messages']), 1)
self.assertEqual(special_search_result['messages'][0][MATCH_TOPIC],
'bread & <span class="highlight">butter</span>')
special_search_narrow = [
dict(operator='search', operand='&'),
]
special_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(special_search_narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
))
self.assertEqual(len(special_search_result['messages']), 1)
self.assertEqual(special_search_result['messages'][0][MATCH_TOPIC],
'bread <span class="highlight">&</span> butter')
self.assertEqual(special_search_result['messages'][0]['match_content'],
'<p>chalk <span class="highlight">&</span> cheese</p>')
def test_messages_in_narrow_for_non_search(self) -> None:
email = self.example_email("cordelia")
self.login(email)
def send(content: str) -> int:
msg_id = self.send_stream_message(
sender_email=email,
stream_name="Verona",
topic_name='test_topic',
content=content,
)
return msg_id
good_id = send('http://foo.com')
bad_id = send('no link here')
msg_ids = [good_id, bad_id]
send('http://bar.com but not in msg_ids')
narrow = [
dict(operator='has', operand='link'),
]
raw_params = dict(msg_ids=msg_ids, narrow=narrow)
params = {k: ujson.dumps(v) for k, v in raw_params.items()}
result = self.client_get('/json/messages/matches_narrow', params)
self.assert_json_success(result)
messages = result.json()['messages']
self.assertEqual(len(list(messages.keys())), 1)
message = messages[str(good_id)]
self.assertIn('a href=', message['match_content'])
self.assertIn('http://foo.com', message['match_content'])
self.assertEqual(message[MATCH_TOPIC], 'test_topic')
def test_get_messages_with_only_searching_anchor(self) -> None:
"""
Test that specifying an anchor but 0 for num_before and num_after
returns at most 1 message.
"""
self.login(self.example_email("cordelia"))
anchor = self.send_stream_message(self.example_email("cordelia"), "Verona")
narrow = [dict(operator='sender', operand=self.example_email("cordelia"))]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow),
anchor=anchor, num_before=0,
num_after=0)) # type: Dict[str, Any]
self.assertEqual(len(result['messages']), 1)
narrow = [dict(operator='is', operand='mentioned')]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow),
anchor=anchor, num_before=0,
num_after=0))
self.assertEqual(len(result['messages']), 0)
def test_get_visible_messages_with_anchor(self) -> None:
def messages_matches_ids(messages: List[Dict[str, Any]], message_ids: List[int]) -> None:
self.assertEqual(len(messages), len(message_ids))
for message in messages:
assert(message["id"] in message_ids)
self.login(self.example_email("hamlet"))
Message.objects.all().delete()
message_ids = []
for i in range(10):
message_ids.append(self.send_stream_message(self.example_email("cordelia"), "Verona"))
data = self.get_messages_response(anchor=message_ids[9], num_before=9, num_after=0)
messages = data['messages']
self.assertEqual(data['found_anchor'], True)
self.assertEqual(data['found_oldest'], False)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], False)
messages_matches_ids(messages, message_ids)
with first_visible_id_as(message_ids[5]):
data = self.get_messages_response(anchor=message_ids[9], num_before=9, num_after=0)
messages = data['messages']
self.assertEqual(data['found_anchor'], True)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], True)
messages_matches_ids(messages, message_ids[5:])
with first_visible_id_as(message_ids[2]):
data = self.get_messages_response(anchor=message_ids[6], num_before=9, num_after=0)
messages = data['messages']
self.assertEqual(data['found_anchor'], True)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], True)
messages_matches_ids(messages, message_ids[2:7])
with first_visible_id_as(message_ids[9] + 1):
data = self.get_messages_response(anchor=message_ids[9], num_before=9, num_after=0)
messages = data['messages']
self.assert_length(messages, 0)
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], True)
data = self.get_messages_response(anchor=message_ids[5], num_before=0, num_after=5)
messages = data['messages']
self.assertEqual(data['found_anchor'], True)
self.assertEqual(data['found_oldest'], False)
self.assertEqual(data['found_newest'], True)
self.assertEqual(data['history_limited'], False)
messages_matches_ids(messages, message_ids[5:])
with first_visible_id_as(message_ids[7]):
data = self.get_messages_response(anchor=message_ids[5], num_before=0, num_after=5)
messages = data['messages']
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], False)
self.assertEqual(data['found_newest'], True)
self.assertEqual(data['history_limited'], False)
messages_matches_ids(messages, message_ids[7:])
with first_visible_id_as(message_ids[2]):
data = self.get_messages_response(anchor=message_ids[0], num_before=0, num_after=5)
messages = data['messages']
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], False)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], False)
messages_matches_ids(messages, message_ids[2:7])
with first_visible_id_as(message_ids[9] + 1):
data = self.get_messages_response(anchor=message_ids[0], num_before=0, num_after=5)
messages = data['messages']
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], False)
self.assertEqual(data['found_newest'], True)
self.assertEqual(data['history_limited'], False)
self.assert_length(messages, 0)
# Verify that with anchor=0 we always get found_oldest=True
with first_visible_id_as(0):
data = self.get_messages_response(anchor=0, num_before=0, num_after=5)
messages = data['messages']
messages_matches_ids(messages, message_ids[0:5])
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], False)
# Verify that with anchor=-1 we always get found_oldest=True
# anchor=-1 is arguably invalid input, but it used to be supported
with first_visible_id_as(0):
data = self.get_messages_response(anchor=-1, num_before=0, num_after=5)
messages = data['messages']
messages_matches_ids(messages, message_ids[0:5])
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], False)
# And anchor='first' does the same thing.
with first_visible_id_as(0):
data = self.get_messages_response(anchor='oldest', num_before=0, num_after=5)
messages = data['messages']
messages_matches_ids(messages, message_ids[0:5])
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], False)
data = self.get_messages_response(anchor=message_ids[5], num_before=5, num_after=4)
messages = data['messages']
self.assertEqual(data['found_anchor'], True)
self.assertEqual(data['found_oldest'], False)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], False)
messages_matches_ids(messages, message_ids)
data = self.get_messages_response(anchor=message_ids[5], num_before=10, num_after=10)
messages = data['messages']
self.assertEqual(data['found_anchor'], True)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], True)
self.assertEqual(data['history_limited'], False)
messages_matches_ids(messages, message_ids)
with first_visible_id_as(message_ids[5]):
data = self.get_messages_response(anchor=message_ids[5], num_before=5, num_after=4)
messages = data['messages']
self.assertEqual(data['found_anchor'], True)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], True)
messages_matches_ids(messages, message_ids[5:])
with first_visible_id_as(message_ids[5]):
data = self.get_messages_response(anchor=message_ids[2], num_before=5, num_after=3)
messages = data['messages']
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], True)
messages_matches_ids(messages, message_ids[5:8])
with first_visible_id_as(message_ids[5]):
data = self.get_messages_response(anchor=message_ids[2], num_before=10, num_after=10)
messages = data['messages']
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], True)
messages_matches_ids(messages, message_ids[5:])
with first_visible_id_as(message_ids[9] + 1):
data = self.get_messages_response(anchor=message_ids[5], num_before=5, num_after=4)
messages = data['messages']
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], True)
self.assertEqual(data['history_limited'], True)
self.assert_length(messages, 0)
with first_visible_id_as(message_ids[5]):
data = self.get_messages_response(anchor=message_ids[5], num_before=0, num_after=0)
messages = data['messages']
self.assertEqual(data['found_anchor'], True)
self.assertEqual(data['found_oldest'], False)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], False)
messages_matches_ids(messages, message_ids[5:6])
with first_visible_id_as(message_ids[5]):
data = self.get_messages_response(anchor=message_ids[2], num_before=0, num_after=0)
messages = data['messages']
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], False)
self.assertEqual(data['found_newest'], False)
self.assertEqual(data['history_limited'], False)
self.assert_length(messages, 0)
# Verify some additional behavior of found_newest.
with first_visible_id_as(0):
data = self.get_messages_response(anchor=LARGER_THAN_MAX_MESSAGE_ID, num_before=5, num_after=0)
messages = data['messages']
self.assert_length(messages, 5)
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], False)
self.assertEqual(data['found_newest'], True)
self.assertEqual(data['history_limited'], False)
# The anchor value of 'last' behaves just like LARGER_THAN_MAX_MESSAGE_ID.
with first_visible_id_as(0):
data = self.get_messages_response(anchor='newest', num_before=5, num_after=0)
messages = data['messages']
self.assert_length(messages, 5)
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], False)
self.assertEqual(data['found_newest'], True)
self.assertEqual(data['history_limited'], False)
with first_visible_id_as(0):
data = self.get_messages_response(anchor=LARGER_THAN_MAX_MESSAGE_ID + 1,
num_before=5, num_after=0)
messages = data['messages']
self.assert_length(messages, 5)
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], False)
self.assertEqual(data['found_newest'], True)
self.assertEqual(data['history_limited'], False)
with first_visible_id_as(0):
data = self.get_messages_response(anchor=LARGER_THAN_MAX_MESSAGE_ID, num_before=20, num_after=0)
messages = data['messages']
self.assert_length(messages, 10)
self.assertEqual(data['found_anchor'], False)
self.assertEqual(data['found_oldest'], True)
self.assertEqual(data['found_newest'], True)
self.assertEqual(data['history_limited'], False)
def test_missing_params(self) -> None:
"""
anchor, num_before, and num_after are all required
POST parameters for get_messages.
"""
self.login(self.example_email("hamlet"))
required_args = (("num_before", 1), ("num_after", 1)) # type: Tuple[Tuple[str, int], ...]
for i in range(len(required_args)):
post_params = dict(required_args[:i] + required_args[i + 1:])
result = self.client_get("/json/messages", post_params)
self.assert_json_error(result,
"Missing '%s' argument" % (required_args[i][0],))
def test_get_messages_limits(self) -> None:
"""
A call to GET /json/messages requesting more than
MAX_MESSAGES_PER_FETCH messages returns an error message.
"""
self.login(self.example_email("hamlet"))
result = self.client_get("/json/messages", dict(anchor=1, num_before=3000, num_after=3000))
self.assert_json_error(result, "Too many messages requested (maximum 5000).")
result = self.client_get("/json/messages", dict(anchor=1, num_before=6000, num_after=0))
self.assert_json_error(result, "Too many messages requested (maximum 5000).")
result = self.client_get("/json/messages", dict(anchor=1, num_before=0, num_after=6000))
self.assert_json_error(result, "Too many messages requested (maximum 5000).")
def test_bad_int_params(self) -> None:
"""
num_before, num_after, and narrow must all be non-negative
integers or strings that can be converted to non-negative integers.
"""
self.login(self.example_email("hamlet"))
other_params = [("narrow", {}), ("anchor", 0)]
int_params = ["num_before", "num_after"]
bad_types = (False, "", "-1", -1)
for idx, param in enumerate(int_params):
for type in bad_types:
# Rotate through every bad type for every integer
# parameter, one at a time.
post_params = dict(other_params + [(param, type)] +
[(other_param, 0) for other_param in
int_params[:idx] + int_params[idx + 1:]]
)
result = self.client_get("/json/messages", post_params)
self.assert_json_error(result,
"Bad value for '%s': %s" % (param, type))
def test_bad_narrow_type(self) -> None:
"""
narrow must be a list of string pairs.
"""
self.login(self.example_email("hamlet"))
other_params = [("anchor", 0), ("num_before", 0), ("num_after", 0)] # type: List[Tuple[str, Union[int, str, bool]]]
bad_types = (False, 0, '', '{malformed json,',
'{foo: 3}', '[1,2]', '[["x","y","z"]]') # type: Tuple[Union[int, str, bool], ...]
for type in bad_types:
post_params = dict(other_params + [("narrow", type)])
result = self.client_get("/json/messages", post_params)
self.assert_json_error(result,
"Bad value for 'narrow': %s" % (type,))
def test_bad_narrow_operator(self) -> None:
"""
Unrecognized narrow operators are rejected.
"""
self.login(self.example_email("hamlet"))
for operator in ['', 'foo', 'stream:verona', '__init__']:
narrow = [dict(operator=operator, operand='')]
params = dict(anchor=0, num_before=0, num_after=0, narrow=ujson.dumps(narrow))
result = self.client_get("/json/messages", params)
self.assert_json_error_contains(result,
"Invalid narrow operator: unknown operator")
def test_invalid_narrow_operand_in_dict(self) -> None:
self.login(self.example_email("hamlet"))
# str or int is required for sender, group-pm-with, stream
invalid_operands = [['1'], [2], None]
error_msg = 'elem["operand"] is not a string or integer'
for operand in ['sender', 'group-pm-with', 'stream']:
self.exercise_bad_narrow_operand_using_dict_api(operand, invalid_operands, error_msg)
# str or int list is required for pm-with operator
invalid_operands = [None]
error_msg = 'elem["operand"] is not a string or an integer list'
self.exercise_bad_narrow_operand_using_dict_api('pm-with', invalid_operands, error_msg)
invalid_operands = [['2']]
error_msg = 'elem["operand"][0] is not an integer'
self.exercise_bad_narrow_operand_using_dict_api('pm-with', invalid_operands, error_msg)
# For others only str is acceptable
invalid_operands = [2, None, [1]]
error_msg = 'elem["operand"] is not a string'
for operand in ['is', 'near', 'has', 'id']:
self.exercise_bad_narrow_operand_using_dict_api(operand, invalid_operands, error_msg)
# The exercise_bad_narrow_operand helper method uses legacy tuple format to
# test bad narrow, this method uses the current dict api format
def exercise_bad_narrow_operand_using_dict_api(self, operator: str,
operands: Sequence[Any],
error_msg: str) -> None:
for operand in operands:
narrow = [dict(operator=operator, operand=operand)]
params = dict(anchor=0, num_before=0, num_after=0, narrow=ujson.dumps(narrow))
result = self.client_get('/json/messages', params)
self.assert_json_error_contains(result, error_msg)
def exercise_bad_narrow_operand(self, operator: str,
operands: Sequence[Any],
error_msg: str) -> None:
other_params = [("anchor", 0), ("num_before", 0), ("num_after", 0)] # type: List[Tuple[str, Any]]
for operand in operands:
post_params = dict(other_params + [
("narrow", ujson.dumps([[operator, operand]]))])
result = self.client_get("/json/messages", post_params)
self.assert_json_error_contains(result, error_msg)
def test_bad_narrow_stream_content(self) -> None:
"""
If an invalid stream name is requested in get_messages, an error is
returned.
"""
self.login(self.example_email("hamlet"))
bad_stream_content = (0, [], ["x", "y"]) # type: Tuple[int, List[None], List[str]]
self.exercise_bad_narrow_operand("stream", bad_stream_content,
"Bad value for 'narrow'")
def test_bad_narrow_one_on_one_email_content(self) -> None:
"""
If an invalid 'pm-with' is requested in get_messages, an
error is returned.
"""
self.login(self.example_email("hamlet"))
bad_stream_content = (0, [], ["x", "y"]) # type: Tuple[int, List[None], List[str]]
self.exercise_bad_narrow_operand("pm-with", bad_stream_content,
"Bad value for 'narrow'")
def test_bad_narrow_nonexistent_stream(self) -> None:
self.login(self.example_email("hamlet"))
self.exercise_bad_narrow_operand("stream", ['non-existent stream'],
"Invalid narrow operator: unknown stream")
non_existing_stream_id = 1232891381239
self.exercise_bad_narrow_operand_using_dict_api('stream', [non_existing_stream_id],
'Invalid narrow operator: unknown stream')
def test_bad_narrow_nonexistent_email(self) -> None:
self.login(self.example_email("hamlet"))
self.exercise_bad_narrow_operand("pm-with", ['[email protected]'],
"Invalid narrow operator: unknown user")
def test_bad_narrow_pm_with_id_list(self) -> None:
self.login(self.example_email('hamlet'))
self.exercise_bad_narrow_operand('pm-with', [-24],
"Bad value for 'narrow': [[\"pm-with\",-24]]")
def test_message_without_rendered_content(self) -> None:
"""Older messages may not have rendered_content in the database"""
m = self.get_last_message()
m.rendered_content = m.rendered_content_version = None
m.content = 'test content'
d = MessageDict.wide_dict(m)
MessageDict.finalize_payload(d, apply_markdown=True, client_gravatar=False)
self.assertEqual(d['content'], '<p>test content</p>')
def common_check_get_messages_query(self, query_params: Dict[str, object], expected: str) -> None:
user_profile = self.example_user('hamlet')
request = POSTRequestMock(query_params, user_profile)
with queries_captured() as queries:
get_messages_backend(request, user_profile)
for query in queries:
if "/* get_messages */" in query['sql']:
sql = str(query['sql']).replace(" /* get_messages */", '')
self.assertEqual(sql, expected)
return
raise AssertionError("get_messages query not found")
def test_find_first_unread_anchor(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
othello = self.example_user('othello')
self.make_stream('England')
# Send a few messages that Hamlet won't have UserMessage rows for.
unsub_message_id = self.send_stream_message(cordelia.email, 'England')
self.send_personal_message(cordelia.email, othello.email)
self.subscribe(hamlet, 'England')
muted_topics = [
['England', 'muted'],
]
set_topic_mutes(hamlet, muted_topics)
# send a muted message
muted_message_id = self.send_stream_message(cordelia.email, 'England', topic_name='muted')
# finally send Hamlet a "normal" message
first_message_id = self.send_stream_message(cordelia.email, 'England')
# send a few more messages
extra_message_id = self.send_stream_message(cordelia.email, 'England')
self.send_personal_message(cordelia.email, hamlet.email)
sa_conn = get_sqlalchemy_connection()
user_profile = hamlet
anchor = find_first_unread_anchor(
sa_conn=sa_conn,
user_profile=user_profile,
narrow=[],
)
self.assertEqual(anchor, first_message_id)
# With the same data setup, we now want to test that a reasonable
# search still gets the first message sent to Hamlet (before he
# subscribed) and other recent messages to the stream.
query_params = dict(
anchor="first_unread",
num_before=10,
num_after=10,
narrow='[["stream", "England"]]'
)
request = POSTRequestMock(query_params, user_profile)
payload = get_messages_backend(request, user_profile)
result = ujson.loads(payload.content)
self.assertEqual(result['anchor'], first_message_id)
self.assertEqual(result['found_newest'], True)
self.assertEqual(result['found_oldest'], True)
messages = result['messages']
self.assertEqual(
{msg['id'] for msg in messages},
{unsub_message_id, muted_message_id, first_message_id, extra_message_id}
)
def test_use_first_unread_anchor_with_some_unread_messages(self) -> None:
user_profile = self.example_user('hamlet')
# Have Othello send messages to Hamlet that he hasn't read.
# Here, Hamlet isn't subscribed to the stream Scotland
self.send_stream_message(self.example_email("othello"), "Scotland")
first_unread_message_id = self.send_personal_message(
self.example_email("othello"),
self.example_email("hamlet"),
)
# Add a few messages that help us test that our query doesn't
# look at messages that are irrelevant to Hamlet.
self.send_personal_message(self.example_email("othello"), self.example_email("cordelia"))
self.send_personal_message(self.example_email("othello"), self.example_email("iago"))
query_params = dict(
anchor="first_unread",
num_before=10,
num_after=10,
narrow='[]'
)
request = POSTRequestMock(query_params, user_profile)
with queries_captured() as all_queries:
get_messages_backend(request, user_profile)
# Verify the query for old messages looks correct.
queries = [q for q in all_queries if '/* get_messages */' in q['sql']]
self.assertEqual(len(queries), 1)
sql = queries[0]['sql']
self.assertNotIn('AND message_id = %s' % (LARGER_THAN_MAX_MESSAGE_ID,), sql)
self.assertIn('ORDER BY message_id ASC', sql)
cond = 'WHERE user_profile_id = %d AND message_id >= %d' % (
user_profile.id, first_unread_message_id,
)
self.assertIn(cond, sql)
cond = 'WHERE user_profile_id = %d AND message_id <= %d' % (
user_profile.id, first_unread_message_id - 1,
)
self.assertIn(cond, sql)
self.assertIn('UNION', sql)
def test_visible_messages_use_first_unread_anchor_with_some_unread_messages(self) -> None:
user_profile = self.example_user('hamlet')
# Have Othello send messages to Hamlet that he hasn't read.
self.subscribe(self.example_user("hamlet"), 'Scotland')
first_unread_message_id = self.send_stream_message(self.example_email("othello"), "Scotland")
self.send_stream_message(self.example_email("othello"), "Scotland")
self.send_stream_message(self.example_email("othello"), "Scotland")
self.send_personal_message(
self.example_email("othello"),
self.example_email("hamlet"),
)
# Add a few messages that help us test that our query doesn't
# look at messages that are irrelevant to Hamlet.
self.send_personal_message(self.example_email("othello"), self.example_email("cordelia"))
self.send_personal_message(self.example_email("othello"), self.example_email("iago"))
query_params = dict(
anchor="first_unread",
num_before=10,
num_after=10,
narrow='[]'
)
request = POSTRequestMock(query_params, user_profile)
first_visible_message_id = first_unread_message_id + 2
with first_visible_id_as(first_visible_message_id):
with queries_captured() as all_queries:
get_messages_backend(request, user_profile)
queries = [q for q in all_queries if '/* get_messages */' in q['sql']]
self.assertEqual(len(queries), 1)
sql = queries[0]['sql']
self.assertNotIn('AND message_id = %s' % (LARGER_THAN_MAX_MESSAGE_ID,), sql)
self.assertIn('ORDER BY message_id ASC', sql)
cond = 'WHERE user_profile_id = %d AND message_id <= %d' % (
user_profile.id, first_unread_message_id - 1
)
self.assertIn(cond, sql)
cond = 'WHERE user_profile_id = %d AND message_id >= %d' % (
user_profile.id, first_visible_message_id
)
self.assertIn(cond, sql)
def test_use_first_unread_anchor_with_no_unread_messages(self) -> None:
user_profile = self.example_user('hamlet')
query_params = dict(
anchor="first_unread",
num_before=10,
num_after=10,
narrow='[]'
)
request = POSTRequestMock(query_params, user_profile)
with queries_captured() as all_queries:
get_messages_backend(request, user_profile)
queries = [q for q in all_queries if '/* get_messages */' in q['sql']]
self.assertEqual(len(queries), 1)
sql = queries[0]['sql']
self.assertNotIn('AND message_id <=', sql)
self.assertNotIn('AND message_id >=', sql)
first_visible_message_id = 5
with first_visible_id_as(first_visible_message_id):
with queries_captured() as all_queries:
get_messages_backend(request, user_profile)
queries = [q for q in all_queries if '/* get_messages */' in q['sql']]
sql = queries[0]['sql']
self.assertNotIn('AND message_id <=', sql)
self.assertNotIn('AND message_id >=', sql)
def test_use_first_unread_anchor_with_muted_topics(self) -> None:
"""
Test that our logic related to `use_first_unread_anchor`
invokes the `message_id = LARGER_THAN_MAX_MESSAGE_ID` hack for
the `/* get_messages */` query when relevant muting
is in effect.
This is a very arcane test on arcane, but very heavily
field-tested, logic in get_messages_backend(). If
this test breaks, be absolutely sure you know what you're
doing.
"""
realm = get_realm('zulip')
self.make_stream('web stuff')
self.make_stream('bogus')
user_profile = self.example_user('hamlet')
muted_topics = [
['Scotland', 'golf'],
['web stuff', 'css'],
['bogus', 'bogus']
]
set_topic_mutes(user_profile, muted_topics)
query_params = dict(
anchor="first_unread",
num_before=0,
num_after=0,
narrow='[["stream", "Scotland"]]'
)
request = POSTRequestMock(query_params, user_profile)
with queries_captured() as all_queries:
get_messages_backend(request, user_profile)
# Do some tests on the main query, to verify the muting logic
# runs on this code path.
queries = [q for q in all_queries if str(q['sql']).startswith("SELECT message_id, flags")]
self.assertEqual(len(queries), 1)
stream = get_stream('Scotland', realm)
recipient_id = get_stream_recipient(stream.id).id
cond = '''AND NOT (recipient_id = {scotland} AND upper(subject) = upper('golf'))'''.format(scotland=recipient_id)
self.assertIn(cond, queries[0]['sql'])
# Next, verify the use_first_unread_anchor setting invokes
# the `message_id = LARGER_THAN_MAX_MESSAGE_ID` hack.
queries = [q for q in all_queries if '/* get_messages */' in q['sql']]
self.assertEqual(len(queries), 1)
self.assertIn('AND zerver_message.id = %d' % (LARGER_THAN_MAX_MESSAGE_ID,),
queries[0]['sql'])
def test_exclude_muting_conditions(self) -> None:
realm = get_realm('zulip')
self.make_stream('web stuff')
user_profile = self.example_user('hamlet')
self.make_stream('irrelevant_stream')
# Test the do-nothing case first.
muted_topics = [
['irrelevant_stream', 'irrelevant_topic']
]
set_topic_mutes(user_profile, muted_topics)
# If nothing relevant is muted, then exclude_muting_conditions()
# should return an empty list.
narrow = [
dict(operator='stream', operand='Scotland'),
]
muting_conditions = exclude_muting_conditions(user_profile, narrow)
self.assertEqual(muting_conditions, [])
# Also test that passing stream ID works
narrow = [
dict(operator='stream', operand=get_stream('Scotland', realm).id)
]
muting_conditions = exclude_muting_conditions(user_profile, narrow)
self.assertEqual(muting_conditions, [])
# Ok, now set up our muted topics to include a topic relevant to our narrow.
muted_topics = [
['Scotland', 'golf'],
['web stuff', 'css'],
]
set_topic_mutes(user_profile, muted_topics)
# And verify that our query will exclude them.
narrow = [
dict(operator='stream', operand='Scotland'),
]
muting_conditions = exclude_muting_conditions(user_profile, narrow)
query = select([column("id").label("message_id")], None, table("zerver_message"))
query = query.where(*muting_conditions)
expected_query = '''\
SELECT id AS message_id \n\
FROM zerver_message \n\
WHERE NOT (recipient_id = %(recipient_id_1)s AND upper(subject) = upper(%(param_1)s))\
'''
self.assertEqual(get_sqlalchemy_sql(query), expected_query)
params = get_sqlalchemy_query_params(query)
self.assertEqual(params['recipient_id_1'], get_recipient_id_for_stream_name(realm, 'Scotland'))
self.assertEqual(params['param_1'], 'golf')
mute_stream(realm, user_profile, 'Verona')
# Using a bogus stream name should be similar to using no narrow at
# all, and we'll exclude all mutes.
narrow = [
dict(operator='stream', operand='bogus-stream-name'),
]
muting_conditions = exclude_muting_conditions(user_profile, narrow)
query = select([column("id")], None, table("zerver_message"))
query = query.where(and_(*muting_conditions))
expected_query = '''\
SELECT id \n\
FROM zerver_message \n\
WHERE recipient_id NOT IN (%(recipient_id_1)s) \
AND NOT \
(recipient_id = %(recipient_id_2)s AND upper(subject) = upper(%(param_1)s) OR \
recipient_id = %(recipient_id_3)s AND upper(subject) = upper(%(param_2)s))\
'''
self.assertEqual(get_sqlalchemy_sql(query), expected_query)
params = get_sqlalchemy_query_params(query)
self.assertEqual(params['recipient_id_1'], get_recipient_id_for_stream_name(realm, 'Verona'))
self.assertEqual(params['recipient_id_2'], get_recipient_id_for_stream_name(realm, 'Scotland'))
self.assertEqual(params['param_1'], 'golf')
self.assertEqual(params['recipient_id_3'], get_recipient_id_for_stream_name(realm, 'web stuff'))
self.assertEqual(params['param_2'], 'css')
def test_get_messages_queries(self) -> None:
query_ids = self.get_query_ids()
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id = 0) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 0}, sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id = 0) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 1, 'num_after': 0}, sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} ORDER BY message_id ASC \n LIMIT 2) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 1}, sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} ORDER BY message_id ASC \n LIMIT 11) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10}, sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id <= 100 ORDER BY message_id DESC \n LIMIT 11) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 100, 'num_before': 10, 'num_after': 0}, sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM ((SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id <= 99 ORDER BY message_id DESC \n LIMIT 10) UNION ALL (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id >= 100 ORDER BY message_id ASC \n LIMIT 11)) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 100, 'num_before': 10, 'num_after': 10}, sql)
def test_get_messages_with_narrow_queries(self) -> None:
query_ids = self.get_query_ids()
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (sender_id = {othello_id} AND recipient_id = {hamlet_recipient} OR sender_id = {hamlet_id} AND recipient_id = {othello_recipient}) AND message_id = 0) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 0,
'narrow': '[["pm-with", "%s"]]' % (self.example_email("othello"),)},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (sender_id = {othello_id} AND recipient_id = {hamlet_recipient} OR sender_id = {hamlet_id} AND recipient_id = {othello_recipient}) AND message_id = 0) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 1, 'num_after': 0,
'narrow': '[["pm-with", "%s"]]' % (self.example_email("othello"),)},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (sender_id = {othello_id} AND recipient_id = {hamlet_recipient} OR sender_id = {hamlet_id} AND recipient_id = {othello_recipient}) ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["pm-with", "%s"]]' % (self.example_email("othello"),)},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (flags & 2) != 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["is", "starred"]]'},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND sender_id = {othello_id} ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["sender", "%s"]]' % (self.example_email("othello"),)},
sql)
sql_template = 'SELECT anon_1.message_id \nFROM (SELECT id AS message_id \nFROM zerver_message \nWHERE recipient_id = {scotland_recipient} ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["stream", "Scotland"]]'},
sql)
sql_template = 'SELECT anon_1.message_id \nFROM (SELECT id AS message_id \nFROM zerver_message \nWHERE recipient_id IN ({public_streams_recipents}) ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["streams", "public"]]'},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND recipient_id NOT IN ({public_streams_recipents}) ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[{"operator":"streams", "operand":"public", "negated": true}]'},
sql)
sql_template = "SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND upper(subject) = upper('blah') ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC"
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["topic", "blah"]]'},
sql)
sql_template = "SELECT anon_1.message_id \nFROM (SELECT id AS message_id \nFROM zerver_message \nWHERE recipient_id = {scotland_recipient} AND upper(subject) = upper('blah') ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC"
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["stream", "Scotland"], ["topic", "blah"]]'},
sql)
# Narrow to pms with yourself
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND sender_id = {hamlet_id} AND recipient_id = {hamlet_recipient} ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["pm-with", "%s"]]' % (self.example_email("hamlet"),)},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND recipient_id = {scotland_recipient} AND (flags & 2) != 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["stream", "Scotland"], ["is", "starred"]]'},
sql)
@override_settings(USING_PGROONGA=False)
def test_get_messages_with_search_queries(self) -> None:
query_ids = self.get_query_ids()
sql_template = """\
SELECT anon_1.message_id, anon_1.flags, anon_1.subject, anon_1.rendered_content, anon_1.content_matches, anon_1.topic_matches \n\
FROM (SELECT message_id, flags, subject, rendered_content, array((SELECT ARRAY[sum(length(anon_3) - 11) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) + 11, strpos(anon_3, '</ts-match>') - 1] AS anon_2 \n\
FROM unnest(string_to_array(ts_headline('zulip.english_us_search', rendered_content, plainto_tsquery('zulip.english_us_search', 'jumping'), 'HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match>'), '<ts-match>')) AS anon_3 \n\
LIMIT ALL OFFSET 1)) AS content_matches, array((SELECT ARRAY[sum(length(anon_5) - 11) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) + 11, strpos(anon_5, '</ts-match>') - 1] AS anon_4 \n\
FROM unnest(string_to_array(ts_headline('zulip.english_us_search', escape_html(subject), plainto_tsquery('zulip.english_us_search', 'jumping'), 'HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match>'), '<ts-match>')) AS anon_5 \n\
LIMIT ALL OFFSET 1)) AS topic_matches \n\
FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \n\
WHERE user_profile_id = {hamlet_id} AND (search_tsvector @@ plainto_tsquery('zulip.english_us_search', 'jumping')) ORDER BY message_id ASC \n\
LIMIT 10) AS anon_1 ORDER BY message_id ASC\
"""
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["search", "jumping"]]'},
sql)
sql_template = """\
SELECT anon_1.message_id, anon_1.subject, anon_1.rendered_content, anon_1.content_matches, anon_1.topic_matches \n\
FROM (SELECT id AS message_id, subject, rendered_content, array((SELECT ARRAY[sum(length(anon_3) - 11) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) + 11, strpos(anon_3, '</ts-match>') - 1] AS anon_2 \n\
FROM unnest(string_to_array(ts_headline('zulip.english_us_search', rendered_content, plainto_tsquery('zulip.english_us_search', 'jumping'), 'HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match>'), '<ts-match>')) AS anon_3 \n\
LIMIT ALL OFFSET 1)) AS content_matches, array((SELECT ARRAY[sum(length(anon_5) - 11) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) + 11, strpos(anon_5, '</ts-match>') - 1] AS anon_4 \n\
FROM unnest(string_to_array(ts_headline('zulip.english_us_search', escape_html(subject), plainto_tsquery('zulip.english_us_search', 'jumping'), 'HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match>'), '<ts-match>')) AS anon_5 \n\
LIMIT ALL OFFSET 1)) AS topic_matches \n\
FROM zerver_message \n\
WHERE recipient_id = {scotland_recipient} AND (search_tsvector @@ plainto_tsquery('zulip.english_us_search', 'jumping')) ORDER BY zerver_message.id ASC \n\
LIMIT 10) AS anon_1 ORDER BY message_id ASC\
"""
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["stream", "Scotland"], ["search", "jumping"]]'},
sql)
sql_template = """\
SELECT anon_1.message_id, anon_1.flags, anon_1.subject, anon_1.rendered_content, anon_1.content_matches, anon_1.topic_matches \n\
FROM (SELECT message_id, flags, subject, rendered_content, array((SELECT ARRAY[sum(length(anon_3) - 11) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) + 11, strpos(anon_3, '</ts-match>') - 1] AS anon_2 \n\
FROM unnest(string_to_array(ts_headline('zulip.english_us_search', rendered_content, plainto_tsquery('zulip.english_us_search', '"jumping" quickly'), 'HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match>'), '<ts-match>')) AS anon_3 \n\
LIMIT ALL OFFSET 1)) AS content_matches, array((SELECT ARRAY[sum(length(anon_5) - 11) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) + 11, strpos(anon_5, '</ts-match>') - 1] AS anon_4 \n\
FROM unnest(string_to_array(ts_headline('zulip.english_us_search', escape_html(subject), plainto_tsquery('zulip.english_us_search', '"jumping" quickly'), 'HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match>'), '<ts-match>')) AS anon_5 \n\
LIMIT ALL OFFSET 1)) AS topic_matches \n\
FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \n\
WHERE user_profile_id = {hamlet_id} AND (content ILIKE '%jumping%' OR subject ILIKE '%jumping%') AND (search_tsvector @@ plainto_tsquery('zulip.english_us_search', '"jumping" quickly')) ORDER BY message_id ASC \n\
LIMIT 10) AS anon_1 ORDER BY message_id ASC\
"""
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 9,
'narrow': '[["search", "\\"jumping\\" quickly"]]'},
sql)
@override_settings(USING_PGROONGA=False)
def test_get_messages_with_search_using_email(self) -> None:
self.login(self.example_email("cordelia"))
messages_to_search = [
('say hello', 'How are you doing, @**Othello, the Moor of Venice**?'),
('lunch plans', 'I am hungry!'),
]
next_message_id = self.get_last_message().id + 1
for topic, content in messages_to_search:
self.send_stream_message(
sender_email=self.example_email("cordelia"),
stream_name="Verona",
content=content,
topic_name=topic,
)
self._update_tsvector_index()
narrow = [
dict(operator='sender', operand=self.example_email("cordelia")),
dict(operator='search', operand=self.example_email("othello")),
]
result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_after=10,
)) # type: Dict[str, Any]
self.assertEqual(len(result['messages']), 0)
narrow = [
dict(operator='sender', operand=self.example_email("cordelia")),
dict(operator='search', operand='othello'),
]
result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_after=10,
))
self.assertEqual(len(result['messages']), 1)
messages = result['messages']
meeting_message = [m for m in messages if m[TOPIC_NAME] == 'say hello'][0]
self.assertEqual(
meeting_message[MATCH_TOPIC],
'say hello')
othello = self.example_user('othello')
self.assertEqual(
meeting_message['match_content'],
('<p>How are you doing, <span class="user-mention" data-user-id="%s">' +
'@<span class="highlight">Othello</span>, the Moor of Venice</span>?</p>') % (
othello.id))
| apache-2.0 | -3,895,741,321,599,924,000 | 45.736357 | 439 | 0.591406 | false |
pseudobeard/teambalancer | legacy/getter.py | 1 | 1244 | import json
import requests
with open('properties.json') as data_file:
data = json.load(data_file)
jwtToken = data["jwtToken"]
id = data["id"]
ITEM_NAME = data["item_name"]
headers = {"authorization" : "Bearer " + jwtToken}
baseurl = "https://api.streamelements.com/kappa/v1/store/"
end = "/redemptions?limit=100&pending=true"
class Getter:
def __init__(self):
return
def getJSON(self):
res = requests.get(baseurl + id + end, headers=headers)
data = json.loads(res.text)
return data
def getViewerGameParticipants(self):
battletags = []
json = self.getJSON()
redemptions = json.get("docs")
for redemption in redemptions: # Iterate throgh redemptions
item = redemption.get("item") # Get item
if item is not None:
itemName = item.get("name")
if itemName == ITEM_NAME: # If it is a viewer ticket, add the battletag to the list
inputs = redemption.get("input")
battletags.append(inputs[0])
return battletags
if __name__=="__main__":
g = Getter()
battletags = g.getViewerGameParticipants()
for battletag in battletags:
print(battletag) | gpl-3.0 | -2,647,808,785,494,130,000 | 26.666667 | 99 | 0.605305 | false |
rmulton/lawen | webservice_caller/GoogleAPICaller.py | 1 | 2949 | import json
import re
from bs4 import BeautifulSoup
from model.Transport.Walk import Walk
from model.Transport.PublicTransport import PublicTransport
from model.Transport.Drive import Drive
from model.Transport.Bicycle import Bicycle
from model.Possibilities import Possibilities
from webservice_caller.TransportAPICaller import TransportAPICaller
from webservice_caller.call_url import call_url, APICallError
class GoogleAPICaller(TransportAPICaller):
'''
Class that handles calling google api to compute itiniraries
'''
_url = 'https://maps.googleapis.com/maps/api/directions/json?'
_key = 'AIzaSyCqgwlzgUDYYF7xnePerJZaapgUWmyGYjc'
def __init__ (self, request):
'''
Create the different parameters that we will need for the API url
'''
self._origin = request.from_x, request.from_y
self._destination = request.to_x, request.to_y
self._modes = {'driving':Drive,'walking':Walk,'bicycling':Bicycle,'transit':PublicTransport}
@property
def modes(self):
return self._modes
def get_times(self):
'''
Get the different times related to the travel modes and returns
a list of objects corresponding to each travel mode'
'''
times = {}
for mode, mode_class in self._modes.items():
url_final = GoogleAPICaller._url + "origin=" + ",".join(str (e) for e in self._origin) + "&destination=" + ",".join(str(f) for f in self._destination) + "&mode=" + mode + "&key=" + GoogleAPICaller._key
response = call_url(url_final)
data = json.loads(response.content)
try:
travel_time = data["routes"][0]["legs"][0]["duration"]["value"]
except IndexError:
raise APICallError
except KeyError:
raise APICallError
times[mode] = travel_time
return times
def get_itineraries(self):
'''
Get the different itineraries related to the travel modes
'''
itineraries = {}
for mode, mode_class in self._modes.items():
url_final = GoogleAPICaller._url + "origin=" + ",".join(str (e) for e in self._origin) + "&destination=" + ",".join(str(f) for f in self._destination) + "&mode=" + mode + "&key=" + GoogleAPICaller._key
response = call_url(url_final)
data = json.loads(response.content)
try:
instruction = data["routes"][0]["legs"][0]["steps"]
except IndexError:
raise APICallError
except KeyError:
raise APICallError
itinerary = ""
for i in range(len(instruction)):
itinerary += instruction[i]["html_instructions"] + ", "
clean_itinerary = BeautifulSoup(itinerary,"html.parser").text
itineraries[mode] = clean_itinerary
return itineraries
| mit | 4,741,626,998,261,590,000 | 39.958333 | 213 | 0.60902 | false |
kevintee/Predicting-Gene-Networks | results/goatools-master/scripts/map_to_slim.py | 1 | 4362 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import os
import os.path as op
import sys
sys.path.insert(0, op.join(op.dirname(__file__), ".."))
from goatools.obo_parser import GODag
from goatools.mapslim import mapslim
# copied from find_enrichment.py
# TODO: put this method into the library, copying is BAD practise
def read_associations(assoc_fn):
assoc = {}
for row in open(assoc_fn):
atoms = row.split()
if len(atoms) == 2:
a, b = atoms
elif len(atoms) > 2 and row.count('\t') == 1:
a, b = row.split("\t")
else:
continue
b = set(b.split(";"))
assoc[a] = b
return assoc
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("%prog [options] go_obo_file goslim_obo_file")
p.add_option("--term", dest="term", help="a term (association id) to map "
"to slim IDs. This can not be used together with "
"--association_file", action="store", type="string",
default=None)
p.add_option("--association_file", dest="ass_file_name", action="store",
help="the file of protein products and their associations "
"to be mapped to GO slim terms. This can not be used "
"together with --term", type="string", default=None)
p.add_option("--slim_out", dest="slim_out", action="store", type="string",
default="direct", help="One of `direct` or `all`. Defines "
"whether the output should contain all slim terms (all "
"ancestors) or only direct slim terms (only direct "
"ancestors)")
opts, args = p.parse_args()
# check for correct number of arguments
if len(args) != 2:
p.print_help()
sys.exit(1)
obo_file = args[0]
assert os.path.exists(obo_file), "file %s not found!" % obo_file
slim_obo_file = args[1]
assert os.path.exists(slim_obo_file), "file %s not found!" % slim_obo_file
# check that either --term or --association_file is set
if (opts.term is None and opts.ass_file_name is None) \
or ((opts.term is not None) and (opts.ass_file_name is not None)):
p.print_help()
sys.exit(1)
# check that slim_out is either "direct" or "all" and set according flag
only_direct = None
if opts.slim_out == "direct":
only_direct = True
elif opts.slim_out == "all":
only_direct = False
else:
p.print_help()
sys.exit(1)
# load DAGs
go_dag = GODag(obo_file)
goslim_dag = GODag(slim_obo_file)
# in case a single term is given as input:
if opts.term:
if opts.term not in go_dag:
print(("term %s not found!" % opts.term), file=sys.stderr)
sys.exit(1)
direct_anc, all_anc = mapslim(opts.term, go_dag, goslim_dag)
# output either all or only direct slims, depending on user command
if only_direct:
slim_terms_str = ";".join(direct_anc)
else:
slim_terms_str = ";".join(all_anc)
print(slim_terms_str)
# in case a association file is given as input
if opts.ass_file_name:
assert os.path.exists(opts.ass_file_name), ("file %s not found!"
% opts.ass_file_name)
assocs = read_associations(opts.ass_file_name)
for protein_product, go_terms in assocs.items():
all_direct_anc = set()
all_covered_anc = set()
all_all_anc = set()
for go_term in go_terms:
if go_term not in go_dag:
continue
direct_anc, all_anc = mapslim(go_term, go_dag, goslim_dag)
all_all_anc |= all_anc
# collect all covered ancestors, so the direct ancestors
# can be calculated afterwards
all_covered_anc |= (all_anc - direct_anc)
all_direct_anc = all_all_anc - all_covered_anc
# output either all or only direct, depending on user command
if only_direct:
slim_terms_str = ";".join(all_direct_anc)
else:
slim_terms_str = ";".join(all_all_anc)
print((protein_product + "\t" + slim_terms_str))
| mit | 2,198,930,277,739,661,600 | 36.282051 | 78 | 0.560064 | false |
polypmer/obligarcy | obligarcy/urls.py | 1 | 1546 | from django.conf.urls import url
from . import views
from django.conf.urls.static import static, settings
urlpatterns = [
# ex: /oblicarcy/
url(r'^$', views.index, name='index'),
url(r'^firehose/$', views.firehose, name='firehose'),
url(r'^profile/$', views.profile, name='profile'),
# ex: /obligarcy/user/5/
url(r'^user/([0-9]+)/$', views.show_prof, name='user'),
url(r'^follow/$', views.follow, name='follow'),
url(r'^update/$', views.update_profile, name='update'),
# ex: /obligarcy/user
#url(r'^user/$', views.profile, name='profile'),
# ex: /oblicarcy/submissions/5/
url(r'^submission/([0-9a-z]+)/$', views.show_sub, name='submission'),
url(r'^submit/([0-9a-z]+)/([0-9]+)/$', views.submit, name='submit'),
url(r'^upload/([0-9a-z]+)/([0-9]+)/$', views.submit_upload, name='upload'),
# url(r'^submit/([0-9a-z]+)/([0-9]+)/$', views.submit, name='submit'),
# ex: /oblicarcy/contracts/5/
url(r'^contract/([0-9a-z]+)/$', views.show_con, name='contract'),
url(r'^challenge/$', views.challenge, name='challenge'),
url(r'^sign/([0-9a-z]+)/$', views.sign_con, name='sign'),
url(r'^active/([0-9]+)/$', views.show_active, name='active'),
# ex: /oblicarcy/login/
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^register/$', views.register, name='register'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| gpl-3.0 | -9,180,719,467,244,064,000 | 40.783784 | 130 | 0.609314 | false |
HybridF5/jacket | jacket/api/compute/openstack/compute/schemas/floating_ips.py | 1 | 1528 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jacket.api.compute.validation import parameter_types
add_floating_ip = {
'type': 'object',
'properties': {
'addFloatingIp': {
'type': 'object',
'properties': {
'address': parameter_types.ip_address,
'fixed_address': parameter_types.ip_address
},
'required': ['address'],
'additionalProperties': False
}
},
'required': ['addFloatingIp'],
'additionalProperties': False
}
remove_floating_ip = {
'type': 'object',
'properties': {
'removeFloatingIp': {
'type': 'object',
'properties': {
'address': parameter_types.ip_address
},
'required': ['address'],
'additionalProperties': False
}
},
'required': ['removeFloatingIp'],
'additionalProperties': False
}
| apache-2.0 | -3,991,817,029,527,831,600 | 29.56 | 78 | 0.600785 | false |
xinl/lifepadbox | lp/time.py | 1 | 1825 | import datetime
class TZOffset(datetime.tzinfo):
def __init__(self, offset_string):
# validity of offset_string is already taken care of by Setting.put() so we just trust it here.
self.offset_string = offset_string
self._h = int(self.offset_string[1:3])
self._m = int(self.offset_string[3:5])
if self.offset_string[0] == "-":
self._h = - self._h
self._m = - self._m
def utcoffset(self, dt): return datetime.timedelta(hours = self._h, minutes = self._m)
def dst(self, dt): return datetime.timedelta(0)
def tzname(self, dt): return self.offset_string
#UTC = TZOffset("+0000")
def str2datetime(time_str, time_zone="+0000"):
""" Convert string (format: YYYY-MM-DD HH:MM:SS) into datetime object. """
# For some unknown reason, datetime.strptime() refuse to work.
ts = time_str.split(' ')
ts[0] = ts[0].split('-')
ts[1] = ts[1].split(':')
time_object = datetime.datetime(int(ts[0][0]), int(ts[0][1]), int(ts[0][2]), int(ts[1][0]), int(ts[1][1]), int(ts[1][2]), 000000, TZOffset(time_zone))
#time_object = datetime.datetime.strptime(time_string, '%Y-%m-%d %H:%M:%S')
#time_object.tzinfo = TZOffset(time_zone)
return time_object
def datetime2str(time_obj):
""" Convert datetime object to string (format: YYYY-MM-DD HH:MM:SS). """
#time_str = time_obj.strftime("%Y-%m-%d %H:%M:%S")
time_str = "-".join([str(time_obj.year), str(time_obj.month), str(time_obj.day)]) + " " + ":".join([str(time_obj.hour), str(time_obj.minute), str(time_obj.second)])
return time_str
def changetz(time_object, timezone_string):
if time_object.tzinfo == None:
time_object = time_object.replace(tzinfo=TZOffset("+0000"))
return time_object.astimezone(TZOffset(timezone_string))
| bsd-2-clause | 3,487,160,411,433,715,700 | 41.465116 | 168 | 0.61863 | false |
johnpeck/cgrlib | cgrlib/tools/cgr_imp.py | 1 | 26283 | #!/usr/bin/env python
# cgr_imp.py
#
# Impedance measurement with the cgr-101 USB oscilloscope
import time # For making pauses
import os # For basic file I/O
import ConfigParser # For reading and writing the configuration file
import sys # For sys.exit()
from math import sin # For generating sine waves
from math import pi
# from scipy.optimize import minimize # For calculating phase shift
# --------------------- Configure argument parsing --------------------
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-r", "--rcfile" , default="cgr-imp.cfg",
help="Runtime configuration file"
)
args = parser.parse_args()
#---------------- Done with configuring argument parsing --------------
#------------------------- Configure logging --------------------------
import logging
from colorlog import ColoredFormatter
# create logger
logger = logging.getLogger('root')
logger.setLevel(logging.DEBUG)
# create console handler (ch) and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create file handler and set level to debug
fh = logging.FileHandler('cgrimp.log',mode='a',encoding=None,delay=False)
fh.setLevel(logging.DEBUG)
color_formatter = ColoredFormatter(
'[ %(log_color)s%(levelname)-8s%(reset)s] %(message)s',
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
)
plain_formatter = logging.Formatter(
'%(asctime)s - %(name)s - [ %(levelname)s ] - %(message)s',
'%Y-%m-%d %H:%M:%S'
)
# Colored output goes to the console
ch.setFormatter(color_formatter)
logger.addHandler(ch)
# Plain output goes to the file
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
# --------------- Done with logging configuration ---------------------
# Now that logging has been set up, bring in the utility functions.
# These will use the same logger as the root application.
from cgrlib import utils
# ------------------ Configure plotting with gnuplot ------------------
# For the Gnuplot module
from numpy import * # For gnuplot.py
import Gnuplot, Gnuplot.funcutils # For gnuplot.py
# Set the gnuplot executable
Gnuplot.GnuplotOpts.gnuplot_command = 'gnuplot'
# Use this option to turn off fifo if you get warnings like:
# line 0: warning: Skipping unreadable file "/tmp/tmpakexra.gnuplot/fifo"
Gnuplot.GnuplotOpts.prefer_fifo_data = 0
# Use temporary files instead of inline data
Gnuplot.GnuplotOpts.prefer_inline_data = 0
# Set the default terminal
Gnuplot.GnuplotOpts.default_term = 'x11'
# ------------------ Done with gnuplot configuration ------------------
cmdterm = '\r\n' # Terminates each command
# ------------- Configure runtime configuration file ------------------
from configobj import ConfigObj # For writing and reading config file
# load_config(configuration file name)
#
# Open the configuration file (if it exists) and return the
# configuration object. If the file doesn't exist, call the init
# function to create it.
#
# This function could probably go in the library, since there's
# nothing unique about it.
def load_config(configFileName):
try:
logger.info('Reading configuration file ' + configFileName)
config = ConfigObj(configFileName,file_error=True)
return config
except IOError:
logger.warning('Did not find configuration file ' +
configFileName)
config = init_config(configFileName)
return config
def init_config(configFileName):
""" Initialize the configuration file and return config object.
Arguments:
configFileName -- Configuration file name
"""
config = ConfigObj()
config.filename = configFileName
config.initial_comment = [
'Configuration file for cgr-imp',
' ']
config.comments = {}
config.inline_comments = {}
#------------------------ Connection section ----------------------
config['Connection'] = {}
config['Connection'].comments = {}
config.comments['Connection'] = [
' ',
'------------------ Connection configuration ------------------'
]
config['Connection']['port'] = '/dev/ttyUSB0'
config['Connection'].comments['port'] = [
' ',
'Manually set the connection port here. This will be overwritten',
'by the most recent successful connection. The software will try',
'to connect using the configuration port first, then it will move',
'on to automatically detected ports and some hardcoded values.'
]
#------------------------- Logging section ------------------------
config['Logging'] = {}
config['Logging'].comments = {}
config.comments['Logging'] = [
' ',
'------------------- Logging configuration --------------------'
]
config['Logging']['termlevel'] = 'debug'
config['Logging'].comments['termlevel'] = [
' ',
'Set the logging level for the terminal. Levels:',
'debug, info, warning, error, critical'
]
config['Logging']['filelevel'] = 'debug'
config['Logging'].comments['filelevel'] = [
' ',
'Set the logging level for the logfile. Levels:',
'debug, info, warning, error, critical'
]
#----------------------- Calibration section ----------------------
config['Calibration'] = {}
config['Calibration'].comments = {}
config.comments['Calibration'] = [
' ',
'----------------- Calibration configuration ------------------'
]
config['Calibration']['calfile'] = 'cgrcal.pkl'
config['Calibration'].comments['calfile'] = [
"The calibration file in Python's pickle format"
]
config['Calibration']['Rshort'] = 0
config['Calibration'].comments['Rshort'] = [
' ',
'Resistance measured with inputs A and B connected to the output (ohms)'
]
#------------------------ Input section ---------------------------
config['Inputs'] = {}
config['Inputs'].comments = {}
config.comments['Inputs'] = [
' ',
'------------------- Input configuration ----------------------'
]
config['Inputs']['gain'] = 1
config['Inputs'].comments['gain'] = [
'Input hardware gain. Remember to run cgr-cal with the correct',
'gain settings to calibrate slope and offset',
' ',
'Gain setting Maximum voltage (V)',
'--------------------------------------------------------------',
' 1 25 ',
' 10 2.5 '
]
#--------------------- Frequency sweep section --------------------
config['Sweep'] = {}
config['Sweep'].comments = {}
config.comments['Sweep'] = [
' ',
'-------------- Frequency sweep configuration -----------------'
]
config['Sweep']['start'] = 100
config['Sweep'].comments['start'] = [
'Starting frequency (Hz)'
]
config['Sweep']['stop'] = 1000
config['Sweep'].comments['stop'] = [
'Last frequency in the sweep (Hz)'
]
config['Sweep']['points'] = 1
config['Sweep'].comments['points'] = [
'Number of points in the sweep'
]
config['Sweep']['cycles'] = 10
config['Sweep'].comments['cycles'] = [
'Number of sine wave cycles to acquire for each frequency step'
]
config['Sweep']['averages'] = 1
config['Sweep'].comments['averages'] = [
'Number of acquisitions to average at each frequency step'
]
config['Sweep']['amplitude'] = 1
config['Sweep'].comments['amplitude'] = [
'Amplitude of the driving frequency (Volts peak)'
]
#------------------ Impedance calculation section -----------------
config['Impedance'] = {}
config['Impedance'].comments = {}
config.comments['Impedance'] = [
' ',
'------------- Impedance calculation configuration ------------'
]
config['Impedance']['resistor'] = 1
config['Impedance'].comments['resistor'] = [
'Reference resistor -- current is voltage divided by this value'
]
# Writing our configuration file
logger.debug('Initializing configuration file ' +
configFileName)
config.write()
return config
# ---------- Done with configuring runtime configuration --------------
def init_logger(config,conhandler,filehandler):
""" Returns the configured console and file logging handlers
Arguments:
config -- The configuration file object
conhandler -- The console logging handler
filehandler -- The file logging handler
"""
if config['Logging']['termlevel'] == 'debug':
conhandler.setLevel(logging.DEBUG)
elif config['Logging']['termlevel'] == 'info':
conhandler.setLevel(logging.INFO)
elif config['Logging']['termlevel'] == 'warning':
conhandler.setLevel(logging.WARNING)
return (conhandler,filehandler)
def get_sweep_list(config):
""" Returns the frequencies in the sweep
Arguments:
config -- The configuration file object
"""
freqlist = []
points = int(config['Sweep']['points'])
startfreq = float(config['Sweep']['start'])
stopfreq = float(config['Sweep']['stop'])
freqlist = logspace(log10(startfreq),log10(stopfreq),points,True)
return freqlist
def set_sample_rate(handle, config, drive_frequency, trigger_dictionary):
"""Returns the sample rate set to acquire multiple periods of the drive frequency
We need to send the trigger dictionary along with the drive
frequency because the two settings share the same register.
Arguments:
handle -- Serial object for the CGR scope
config -- The configuration file object
drive_frequency -- The drive frequency (Hz)
trigger_dictionary -- Trigger settings
"""
capture_points = 1024 # Points acquired after a trigger
seconds_needed = int(config['Sweep']['cycles'])/drive_frequency
target_rate = capture_points/seconds_needed
[control_register_value, actual_samplerate] = utils.set_ctrl_reg(
handle, target_rate, trigger_dictionary
)
return actual_samplerate
def get_volts_rms(voltdata):
"""Returns the calculated Vrms for both channels
Arguments:
voltdata -- 1024 x 2 list of voltage samples
"""
offsets = []
offsets.append(mean(voltdata[0]))
offsets.append(mean(voltdata[1]))
sum = [0,0]
for point in range(len(voltdata[0])):
sum[0] += (voltdata[0][point] - offsets[0])**2
sum[1] += (voltdata[1][point] - offsets[1])**2
vrms = [sqrt(sum[0]/1024),sqrt(sum[1]/1024)]
return(vrms[0],vrms[1])
def get_sine_vectors(frequency,timedata,voltdata):
"""Returns the amplitudes for both channels using a homodyne technique
Amplitude values are peak volts (Vp)
Arguments:
frequency -- the frequency to lock in on
timedata -- List of sample times
voltdata -- 1024 x 2 list of voltage samples
"""
offsets = []
offsets.append(mean(voltdata[0]))
offsets.append(mean(voltdata[1]))
refsin = []
refcos = []
for time in timedata:
refsin.append(sin(2*pi*frequency*time))
refcos.append(cos(2*pi*frequency*time))
sineprod = []
cosprod = []
vectors = [] # [real part, imaginary part]
for channelnum in range(2):
sineprod.append(multiply(voltdata[channelnum]-offsets[channelnum],refsin))
cosprod.append(multiply(voltdata[channelnum]-offsets[channelnum],refcos))
vectors.append([mean(sineprod[channelnum]),mean(cosprod[channelnum])])
inphase_amplitudes = [mean(sineprod[0]), mean(sineprod[1])]
quadrature_amplitudes = [mean(cosprod[0]), mean(cosprod[1])]
amplitudes = []
phases = []
for channelnum in range(2):
amplitudes.append(2*sqrt(inphase_amplitudes[channelnum]**2 +
quadrature_amplitudes[channelnum]**2)
)
# Use arctan2 to allow angle to run from 0 --> 2pi
phases.append(arctan2(quadrature_amplitudes[channelnum],
inphase_amplitudes[channelnum])
)
# return [amplitudes, phases]
return vectors
def vector_length(vector):
"""Returns the length of the input vector
Arguments:
vector -- [real part, imaginary part] two-member list
"""
length = sqrt(vector[0]**2 + vector[1]**2)
return length
def vector_angle(vector):
"""Returns the angle of the input vector in radians
Arguments:
vector -- [real part, imaginary part] two-member list
"""
angle = arctan2(vector[1],vector[0])
return angle
def get_z_vector(config, frequency, timedata, voltdata):
"""Returns the magnitude and phase of the measured impedance
Arguments:
config -- The configuration file object
frequency -- The frequency to lock in on
timedata -- List of sample times
voltdata -- 1024 x 2 list of voltage samples
"""
resistor = float(config['Impedance']['resistor'])
vectors = get_sine_vectors(frequency, timedata, voltdata)
ratio_mag = vector_length(vectors[0])/vector_length(vectors[1])
ratio_phi = vector_angle(vectors[0]) - vector_angle(vectors[1])
ratio_real = ratio_mag * cos(ratio_phi)
ratio_imag = ratio_mag * sin(ratio_phi)
impedance_uncal = [resistor * (ratio_real - 1),resistor * (ratio_imag)]
impedance = [impedance_uncal[0] - float(config['Calibration']['Rshort']),
impedance_uncal[1]]
return impedance
def get_input_means(handle, gainlist, caldict):
"""Returns the mean voltages [chA mean, chB mean]
Arguments:
handle -- Serial object for the CGR-101
gainlist -- Gain configuration
caldict -- A dictionary of (calibration factor names) : values
"""
offsets = []
trigdict = utils.get_trig_dict(3,0,0,512)
[ctrl_reg, fsamp_act] = utils.set_ctrl_reg(handle, 1e5, trigdict)
tracedata = utils.get_uncal_forced_data(handle,ctrl_reg)
voltdata = utils.get_cal_data(caldict,gainlist,tracedata)
offsets.append(mean(voltdata[0]))
offsets.append(mean(voltdata[1]))
return(offsets)
def wave_plot_init():
"""Returns the configured gnuplot plot object for raw waveforms.
"""
# Set debug=1 to see gnuplot commands during execution.
plotobj = Gnuplot.Gnuplot(debug=0)
plotobj('set terminal x11') # Send a gnuplot command
plotobj('set style data lines')
plotobj('set key bottom left')
plotobj.xlabel('Time (s)')
plotobj.ylabel('Voltage (V)')
plotobj("set autoscale y")
plotobj("set format x '%0.0s %c'")
plotobj('set pointsize 1')
return plotobj
def magnitude_plot_init():
"""Returns the configured gnuplot plot object for the impedance
magnitude.
"""
# Set debug=1 to see gnuplot commands during execution.
plotobj = Gnuplot.Gnuplot(debug=0)
plotobj('set terminal x11') # Send a gnuplot command
plotobj('set style data lines')
plotobj('set key bottom left')
plotobj.xlabel('Frequency (Hz)')
plotobj.ylabel('|Z| (Ohms)')
plotobj("set autoscale y")
plotobj('set logscale x')
plotobj("set format x '%0.0s %c'")
plotobj('set pointsize 1')
return plotobj
def real_plot_init():
"""Returns the configured gnuplot plot object for Real(impedance)
"""
# Set debug=1 to see gnuplot commands during execution.
plotobj = Gnuplot.Gnuplot(debug=0)
plotobj('set terminal x11') # Send a gnuplot command
plotobj('set style data lines')
plotobj('set key bottom left')
plotobj.xlabel('Frequency (Hz)')
plotobj.ylabel('Real(Z) (Ohms)')
plotobj("set autoscale y")
plotobj("set format x '%0.0s %c'")
plotobj('set pointsize 1')
return plotobj
def capacitance_plot_init():
"""Returns the configured gnuplot plot object for capacitance
"""
# Set debug=1 to see gnuplot commands during execution.
plotobj = Gnuplot.Gnuplot(debug=0)
plotobj('set terminal x11') # Send a gnuplot command
plotobj('set style data lines')
plotobj('set key bottom left')
plotobj.xlabel('Frequency (Hz)')
plotobj.ylabel('Capacitance (F)')
plotobj("set autoscale y")
plotobj("set format x '%0.0s %c'")
plotobj("set format y '%0.1s %c'")
plotobj('set pointsize 1')
return plotobj
def plot_wave_data(plotobj, timedata, voltdata, trigdict, frequency, sine_vectors):
"""Plot data from both channels along with the fit result.
Arguments:
plotobj -- The gnuplot plot object
timedata -- List of sample times
voltdata -- 1024 x 2 list of voltage samples
trigdict -- Trigger parameter dictionary
frequency -- The frequency of the synthesized fit
sine_vectors -- List of [real part, imaginary part] vectors
"""
fitdata = [[],[]]
for time in timedata:
for channelnum in range(2):
fitdata[channelnum].append(
2 * vector_length(sine_vectors[channelnum]) *
sin(2*pi*frequency*time + vector_angle(sine_vectors[channelnum])) +
mean(voltdata[channelnum])
)
plotitem_cha_raw = Gnuplot.PlotItems.Data(
timedata,voltdata[0],title='Channel A raw')
plotitem_chb_raw = Gnuplot.PlotItems.Data(
timedata,voltdata[1],title='Channel B raw')
plotitem_cha_recovered = Gnuplot.PlotItems.Data(
timedata,fitdata[0],title='Channel A recovered')
plotitem_chb_recovered = Gnuplot.PlotItems.Data(
timedata,fitdata[1],title='Channel B recovered')
plotobj.plot(plotitem_cha_raw,plotitem_chb_raw,
plotitem_cha_recovered, plotitem_chb_recovered)
# Freeze the axis limits after the initial autoscale.
plotobj('unset autoscale y')
plotobj('set yrange [GPVAL_Y_MIN:GPVAL_Y_MAX]')
# Add the trigger crosshair
if (trigdict['trigsrc'] < 3):
trigtime = timedata[1024-trigdict['trigpts']]
plotobj('set arrow from ' + str(trigtime) + ',graph 0 to ' +
str(trigtime) + ',graph 1 nohead linetype 0')
plotobj('set arrow from graph 0,first ' + str(trigdict['triglev']) +
' to graph 1,first ' + str(trigdict['triglev']) +
' nohead linetype 0')
plotobj('replot')
savefilename = ('trig.eps')
plotobj('set terminal postscript eps color')
plotobj("set output '" + savefilename + "'")
plotobj('replot')
plotobj('set terminal x11')
def plot_magnitude_data(plotobj, frequencies, impedances):
"""Plot impedance magnitude data.
Arguments:
plotobj -- The gnuplot plot object
frequencies -- List of drive frequencies
impedances -- List of [real, imaginary] impedances at the drive frequencies
"""
magnitudes = []
for z in impedances:
magnitudes.append(vector_length(z))
plotitem_zmag = Gnuplot.PlotItems.Data(
frequencies,magnitudes,title='Impedance magnitude')
plotobj.plot(plotitem_zmag)
savefilename = ('zmag.eps')
plotobj('set terminal postscript eps color')
plotobj("set output '" + savefilename + "'")
plotobj('replot')
plotobj('set terminal x11')
def plot_real_data(plotobj, frequencies, impedances):
"""Plot Real(Z) data
Arguments:
plotobj -- The gnuplot plot object
frequencies -- List of drive frequencies
impedances -- List of [real, imaginary] impedances at the drive frequencies
"""
resistances = []
for z in impedances:
resistances.append(z[0])
plotitem_zreal = Gnuplot.PlotItems.Data(
frequencies,resistances,title='Resistance')
plotobj.plot(plotitem_zreal)
savefilename = ('zreal.eps')
plotobj('set terminal postscript eps color')
plotobj("set output '" + savefilename + "'")
plotobj('replot')
plotobj('set terminal x11')
def plot_capacitance_data(plotobj, frequencies, impedances):
"""Plot capacitances calculated from impedances
Arguments:
plotobj -- The gnuplot plot object
frequencies -- List of drive frequencies
impedances -- List of [real, imaginary] impedances at the drive frequencies
"""
capacitances = []
for frequency, impedance in zip(frequencies, impedances):
capacitances.append(-1/(2 * pi * frequency * impedance[1]))
plotitem_zcap = Gnuplot.PlotItems.Data(
frequencies, capacitances, title='Capacitance')
plotobj.plot(plotitem_zcap)
savefilename = ('zcap.eps')
plotobj('set terminal postscript eps color')
plotobj("set output '" + savefilename + "'")
plotobj('replot')
plotobj('set terminal x11')
# ------------------------- Main procedure ----------------------------
def main():
logger.debug('Utility module number is ' + str(utils.utilnum))
config = load_config(args.rcfile)
global ch,fh # Need to modify console and file logger handlers
# with the config file, from inside main(). They
# thus must be made global.
(ch,fh) = init_logger(config,ch,fh)
cgr = utils.get_cgr(config)
caldict = utils.load_cal(cgr, config['Calibration']['calfile'])
eeprom_list = utils.get_eeprom_offlist(cgr)
# Configure the inputs for 10x gain
if (int(config['Inputs']['gain']) == 10):
gainlist = utils.set_hw_gain(cgr,[1,1])
else:
gainlist = utils.set_hw_gain(cgr,[0,0])
meanvolts = get_input_means(cgr, gainlist, caldict)
logger.debug('Channel A mean is ' + '{:0.3f}'.format(meanvolts[0]) + ' V')
logger.debug('Channel B mean is ' + '{:0.3f}'.format(meanvolts[1]) + ' V')
# Configure the trigger:
# Trigger on channel A
# Trigger at channel A's mean voltage
# Trigger on the rising edge
# Capture 512 points after trigger
trigdict = utils.get_trig_dict(0,
meanvolts[0],
0,
512
)
utils.set_trig_level(cgr, caldict, gainlist, trigdict)
utils.set_trig_samples(cgr,trigdict)
waveplot = wave_plot_init()
magplot = magnitude_plot_init()
realplot = real_plot_init()
capplot = capacitance_plot_init()
freqlist = get_sweep_list(config)
drive_frequency_list = []
impedance_list = []
for progfreq in freqlist:
# The actual frequency will be determined by the hardware
actfreq = utils.set_sine_frequency(cgr, float(progfreq))
drive_frequency_list.append(actfreq)
logger.debug('Requested ' + '{:0.2f}'.format(float(progfreq)) +
' Hz, set ' + '{:0.2f}'.format(actfreq) + ' Hz')
if (progfreq == freqlist[0]):
# Only set amplitude once
actamp = utils.set_output_amplitude(cgr, float(config['Sweep']['amplitude']))
logger.debug('Requested ' + '{:0.2f}'.format(float(config['Sweep']['amplitude'])) +
' Vp, set ' + '{:0.2f}'.format(actamp) + ' Vp')
actrate = set_sample_rate(cgr, config, actfreq, trigdict)
logger.debug('Sample rate set to ' + '{:0.2f}'.format(actrate) +
' Hz, for an acquisition time of ' + '{:0.2f}'.format(1024/actrate * 1000) +
' milliseconds'
)
for capturenum in range(int(config['Sweep']['averages'])):
if trigdict['trigsrc'] == 3:
# Internal trigger
tracedata = utils.get_uncal_forced_data(cgr,ctrl_reg)
elif trigdict['trigsrc'] < 3:
# Trigger on a voltage present at some input
tracedata = utils.get_uncal_triggered_data(cgr,trigdict)
logger.info('Acquiring trace ' + str(capturenum + 1) + ' of ' +
str(int(config['Sweep']['averages']))
)
if capturenum == 0:
sumdata = tracedata
else:
sumdata = add(sumdata,tracedata)
avgdata = divide(sumdata,float(capturenum +1))
# Apply calibration
voltdata = utils.get_cal_data(caldict,gainlist,avgdata)
if (int(config['Inputs']['gain']) == 10):
# Divide by 10 for 10x hardware gain with no probe
voltdata = divide(voltdata,10)
timedata = utils.get_timelist(actrate)
sine_vectors = get_sine_vectors(actfreq, timedata, voltdata)
logger.debug('Channel A amplitude is ' +
'{:0.3f}'.format(2*vector_length(sine_vectors[0])) +
' Vp'
)
logger.debug('Channel B amplitude is ' +
'{:0.3f}'.format(2*vector_length(sine_vectors[1])) +
' Vp'
)
logger.debug('Channel A phase shift is ' +
'{:0.3f}'.format(vector_angle(sine_vectors[0]) * 180/pi) +
' degrees'
)
logger.debug('Channel B phase shift is ' +
'{:0.3f}'.format(vector_angle(sine_vectors[1]) * 180/pi) +
' degrees'
)
plot_wave_data(waveplot, timedata, voltdata, trigdict, actfreq, sine_vectors)
impedance = get_z_vector(config, actfreq, timedata, voltdata)
logger.debug('Impedance magnitude is ' +
'{:0.3f}'.format(vector_length(impedance)) +
' Ohms'
)
logger.debug('Impedance angle is ' +
'{:0.3f}'.format(vector_angle(impedance) * 180/pi) +
' degrees'
)
impedance_list.append(impedance)
if (len(drive_frequency_list) > 1):
plot_magnitude_data(magplot, drive_frequency_list, impedance_list)
plot_real_data(realplot, drive_frequency_list, impedance_list)
plot_capacitance_data(capplot, drive_frequency_list, impedance_list)
# Set amplitude to zero to end the sweep
utils.set_output_amplitude(cgr, 0.01)
raw_input('Press any key to close plot and exit...')
# Execute main() from command line
if __name__ == '__main__':
main()
| mit | -1,559,406,550,913,330,200 | 35.862553 | 97 | 0.603051 | false |
tuwmft/MatterBot | mattermost_bot/plugins/mlpl/Game.py | 1 | 9936 | from threading import Timer
import random
import string
class Game():
IDDLE = 0
START_GAME = 1
START_ROUND = 2
DURING_ROUND = 3
END_ROUND = 4
POINTS_PER_SCORE = [
1,
2,
3,
5,
7,
10,
14,
20,
]
MLPL_BONUS = 10
MESSAGES = {
'start_round' : [
'ok faut troué le mot avec sa les gars : {}',
'voila les lettr qu''on vuet : {}',
'c sa les lettre maietenant : {}',
'on trouve le mot ki contient sa : {}',
],
'end_round' : [
'ct sa le mot qu''orn voulez : {}',
'le mot gaggnant : {}',
'le mot queest le meileur : {}',
'c sa qui gagen : {}',
],
'prepare_next_round' : [
'allé on se prepar',
'sa va continué, on est pret la',
'oké la suite mentienant',
'bon sa continu',
],
'best_proposition' : [
'POUUUUAH allé {} il trovue le max de {} letre',
'ALLLEZZZ {} il met les {} leterr !!!',
'WOOOOOUH {} il a trouver les {} lettre',
'JAVOUUUU bien jour {} il a fait {} letre !!',
],
'good_proposition' : [
'c pa mal sa {}. {} lettres',
'jaim bien ta porpositon {}. Sa fai {} lettres',
'alé bien ouej {} !! sa fé {} lettere cousin',
'pouuaaah commen il déchire {}, il a trouver {} letre',
],
'better_proposition' : [
'{} il nik {} lol. {} letre maintenat',
'{} passe devan {} avek {} letre',
'ouuuuuuuhhhhaaa alé {} a niker {} avec {} leterte',
],
'better_auto_proposition' : [
'{} se bat luimeme et fait {} letre !',
],
'the_winner_is' : [
'c {} qui a gagner, ac {} letr. {} point maggle',
'{} et le ganian. {} retre. Bi1 jouer couz. {} en plus',
'{} a fé {} letre et a gagner ce roundd. {} ppin en plu pour toi',
],
'no_winner' : [
'person a trouver lol',
'pa de gagnant. vous ete nul ou koi',
],
'i_did_not_really_understand_sorry' : [
'kwa',
'hein',
'kétuti',
'g pa compri',
'koi',
],
}
DICO = [
'a',
'b',
'et',
'chibre',
'fesse',
'caca',
'acac',
]
def __init__(self, id, send_message):
self.id = id
self.send_message = send_message
self.timers = []
self.current_letters = []
self.scores = {}
self.load_dico()
def listen(self, message):
if self.status == Game.DURING_ROUND:
self.handle_proposition(message)
def start(self):
self.send_message("c parti pour le jeu dans {}".format(self.id))
self.status = Game.START_GAME
self.delayed_action(3, self.start_round)
self.load_scores()
def stop(self):
self.status = Game.IDDLE
self.send_message("a ok c torminé alors")
self.clear_timers()
self.save_scores()
self.show_scores()
def start_round(self):
letters = []
for x in range(8):
letters.append(string.ascii_lowercase[random.randint(0, 25)])
message = self.get_random_message('start_round')
self.send_message(message.format(', '.join(letters).upper()))
self.current_letters = letters
self.current_best_words = self.find_best_words(letters)
self.current_best_proposition = ()
self.status = Game.DURING_ROUND
self.delayed_action(30, self.end_round)
def end_round(self, mlpl=False):
message = self.get_random_message('end_round')
best_words = self.find_best_words(self.current_letters)
if best_words:
self.send_message(message.format('`' + '` ou `'.join(best_words).upper() + '`'))
else:
self.send_message(message.format(' RIEN DUTOUT lol. CT pas facile la javou'))
if self.current_best_proposition:
winner = self.current_best_proposition[0]
score = self.current_best_proposition[1]
message = self.get_random_message('the_winner_is')
points = self.award_player(winner, score, mlpl)
self.send_message(message.format(winner, score, points))
else:
message = self.get_random_message('no_winner')
self.send_message(message)
self.status = Game.END_ROUND
self.delayed_action(3, self.prepare_next_round)
self.save_scores()
def load_scores(self):
try:
f = open('./mattermost_bot/plugins/mlpl/scores', 'r+')
for line in f:
line_data = line.split(':')
self.scores[line_data[0]] = int(line_data[1])
f.close()
print('scores sarzés')
except IOError as e:
print('err : impossible de charger les scores')
print(str(e))
def save_scores(self):
try:
f = open('./mattermost_bot/plugins/mlpl/scores', 'w+')
for name, score in self.scores.items():
f.write('{}:{}:\n'.format(name, score))
f.close()
print('scores enrizistrés')
except IOError:
print('err : impossible de sauvegarder les scores')
def award_player(self, name, score, mlpl=False):
points = Game.POINTS_PER_SCORE[score - 1]
if mlpl:
points += Game.MLPL_BONUS
if name in self.scores:
self.scores[name] += points
else:
self.scores[name] = points
return points
def prepare_next_round(self):
message = self.get_random_message('prepare_next_round')
self.send_message(message)
self.status = Game.START_ROUND
self.current_best_proposition = ()
self.delayed_action(2, self.start_round)
def delayed_action(self, delay, action, args=[]):
timer = Timer(delay, action, args)
timer.start()
self.timers.append(timer)
def handle_proposition(self, message):
proposition = message.get_message()
sender = message.get_username()
if not self.word_exists(proposition):
return
if not self.is_word_made_of_current_letters(proposition):
return
score = len(proposition)
if self.current_best_words:
best_len = len(self.current_best_words[0])
if score == best_len:
message = self.get_random_message('best_proposition')
self.send_message(message.format(sender, score))
self.current_best_proposition = (sender, score)
self.clear_timers()
self.end_round(mlpl=True)
if not self.current_best_proposition:
message = self.get_random_message('good_proposition')
self.send_message(message.format(sender, score))
self.current_best_proposition = (sender, score)
else:
last_score = self.current_best_proposition[1]
last_sender = self.current_best_proposition[0]
if last_score < score:
if last_sender == sender:
message = self.get_random_message('better_auto_proposition')
self.send_message(message.format(sender, score))
else:
message = self.get_random_message('better_proposition')
self.send_message(message.format(sender, last_sender, score))
self.current_best_proposition = (sender, score)
def show_scores(self):
if not self.scores:
self.send_message('pa de score encor')
return
self.send_message(
'les scores : \n{}'.format(
'\n'.join(' {} : {}'.format(n, s) for n, s in self.scores.items())
)
)
def load_dico(self):
self.dico = []
try:
f = open('./mattermost_bot/plugins/mlpl/dico.txt', 'r')
for line in f:
cleaned_line = line.replace('\n', '').replace('\r', '').lower()
self.dico.append(cleaned_line)
f.close()
except IOError:
print('err : dico pas chargeaaaable')
print('dico sarzé')
def get_dico(self):
return self.dico
def word_exists(self, word):
return word.lower() in self.get_dico()
def find_best_words(self, letters):
best_words = []
for word in self.get_dico():
word_ok = self.is_word_made_of_letters(word, letters)
if word_ok:
word_len = len(word)
if best_words:
best_word_len = len(best_words[0])
if word_len == best_word_len:
best_words.append(word)
if word_len > best_word_len:
best_words = [word]
else:
best_words = [word]
return best_words
def get_random_message(self, message_category):
messages = Game.MESSAGES[message_category]
return messages[random.randint(0, len(messages) - 1)]
def clear_timers(self):
for timer in self.timers:
timer.cancel()
def is_word_made_of_letters(self, proposition, letters):
word_ok = True
check_letters = letters[:]
for letter in proposition.lower():
if letter not in check_letters:
word_ok = False
break
check_letters.remove(letter)
return word_ok
def is_word_made_of_current_letters(self, proposition):
return self.is_word_made_of_letters(proposition, self.current_letters)
| mit | 1,998,505,489,977,923,300 | 28.88253 | 92 | 0.519605 | false |
aveao/AveBot | cogs/stockstream.py | 1 | 1897 | import discord
from discord.ext import commands
import secrets
class Stockstream:
def __init__(self, bot):
self.bot = bot
@commands.command()
async def copypasta(self, ctx, ticker: str):
"""Generates a copypasta for StockStream using the given ticker."""
copypasta_list = ["Kreygasm MUST Kreygasm BUY Kreygasm {} Kreygasm THIS Kreygasm ROUND Kreygasm",
"FutureMan BUY FutureMan {} FutureMan FOR FutureMan A FutureMan BRIGHTER FutureMan FUTURE FutureMan",
"Clappy Lemme buy a {0} before I send you a {0} Clappy",
"GivePLZ TRAIN TO PROFIT TOWN TakeNRG BUY {}! GivePLZ BUY {} TakeNRG",
"PogChamp {} PogChamp IS PogChamp OUR PogChamp LAST PogChamp HOPE PogChamp"]
to_post = f"Copypasta ready: `{secrets.choice(copypasta_list).format(ticker.upper())}`"
await ctx.send(to_post)
@commands.command()
async def copypastasell(self, ctx, ticker: str):
"""Generates a copypasta for StockStream using the given ticker."""
copypasta_list = ["Kreygasm MUST Kreygasm SELL Kreygasm {} Kreygasm THIS Kreygasm ROUND Kreygasm",
"Kreygasm TIME Kreygasm TO Kreygasm CASH Kreygasm IN Kreygasm {} Kreygasm",
"FutureMan SELL FutureMan {} FutureMan FOR FutureMan A FutureMan BRIGHTER FutureMan FUTURE FutureMan",
"Clappy Lemme sell a {0} before I send you a {0} Clappy",
"GivePLZ TRAIN TO PROFIT TOWN TakeNRG SELL {}! GivePLZ SELL {} TakeNRG",
"SELLING PogChamp {} PogChamp IS PogChamp OUR PogChamp LAST PogChamp HOPE PogChamp"]
to_post = f"Copypasta ready: `{secrets.choice(copypasta_list).format(ticker.upper())}`"
await ctx.send(to_post)
def setup(bot):
bot.add_cog(Stockstream(bot))
| mit | 6,933,392,645,307,308,000 | 53.2 | 128 | 0.627306 | false |
ttrifonov/horizon | horizon/horizon/dashboards/nova/access_and_security/security_groups/tests.py | 1 | 10057 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.conf import settings
from django.core.urlresolvers import reverse
from novaclient import exceptions as novaclient_exceptions
from novaclient.v1_1 import security_group_rules as nova_rules
from mox import IsA
from horizon import api
from horizon import test
from .tables import SecurityGroupsTable, RulesTable
SECGROUP_ID = '2'
INDEX_URL = reverse('horizon:nova:access_and_security:index')
SG_CREATE_URL = \
reverse('horizon:nova:access_and_security:security_groups:create')
SG_EDIT_RULE_URL = \
reverse('horizon:nova:access_and_security:security_groups:edit_rules',
args=[SECGROUP_ID])
def strip_absolute_base(uri):
return uri.split(settings.TESTSERVER, 1)[-1]
class SecurityGroupsViewTests(test.BaseViewTests):
def setUp(self):
super(SecurityGroupsViewTests, self).setUp()
sg1 = api.SecurityGroup(None)
sg1.id = 1
sg1.name = 'default'
sg2 = api.SecurityGroup(None)
sg2.id = 2
sg2.name = 'group_2'
rule = {'id': 1,
'ip_protocol': u"tcp",
'from_port': "80",
'to_port': "80",
'parent_group_id': "2",
'ip_range': {'cidr': "0.0.0.0/32"}}
manager = nova_rules.SecurityGroupRuleManager
rule_obj = nova_rules.SecurityGroupRule(manager, rule)
self.rules = [rule_obj]
sg1.rules = self.rules
sg2.rules = self.rules
self.security_groups = (sg1, sg2)
def test_create_security_groups_get(self):
res = self.client.get(SG_CREATE_URL)
self.assertTemplateUsed(res,
'nova/access_and_security/security_groups/create.html')
def test_create_security_groups_post(self):
SECGROUP_NAME = 'fakegroup'
SECGROUP_DESC = 'fakegroup_desc'
new_group = self.mox.CreateMock(api.SecurityGroup)
new_group.name = SECGROUP_NAME
formData = {'method': 'CreateGroup',
'tenant_id': self.TEST_TENANT,
'name': SECGROUP_NAME,
'description': SECGROUP_DESC,
}
self.mox.StubOutWithMock(api, 'security_group_create')
api.security_group_create(IsA(http.HttpRequest),
SECGROUP_NAME, SECGROUP_DESC).AndReturn(new_group)
self.mox.ReplayAll()
res = self.client.post(SG_CREATE_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_create_security_groups_post_exception(self):
SECGROUP_NAME = 'fakegroup'
SECGROUP_DESC = 'fakegroup_desc'
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
formData = {'method': 'CreateGroup',
'tenant_id': self.TEST_TENANT,
'name': SECGROUP_NAME,
'description': SECGROUP_DESC,
}
self.mox.StubOutWithMock(api, 'security_group_create')
api.security_group_create(IsA(http.HttpRequest),
SECGROUP_NAME, SECGROUP_DESC).AndRaise(exception)
self.mox.ReplayAll()
res = self.client.post(SG_CREATE_URL, formData)
self.assertTemplateUsed(res,
'nova/access_and_security/security_groups/create.html')
def test_edit_rules_get(self):
self.mox.StubOutWithMock(api, 'security_group_get')
api.security_group_get(IsA(http.HttpRequest), SECGROUP_ID).AndReturn(
self.security_groups[1])
self.mox.ReplayAll()
res = self.client.get(SG_EDIT_RULE_URL)
self.assertTemplateUsed(res,
'nova/access_and_security/security_groups/edit_rules.html')
self.assertItemsEqual(res.context['security_group'].name,
self.security_groups[1].name)
def test_edit_rules_get_exception(self):
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
self.mox.StubOutWithMock(api, 'security_group_get')
api.security_group_get(IsA(http.HttpRequest), SECGROUP_ID) \
.AndRaise(exception)
self.mox.ReplayAll()
res = self.client.get(SG_EDIT_RULE_URL)
self.assertRedirects(res, INDEX_URL)
def test_edit_rules_add_rule(self):
RULE_ID = '1'
FROM_PORT = '-1'
TO_PORT = '-1'
IP_PROTOCOL = 'icmp'
CIDR = '0.0.0.0/0'
new_rule = self.mox.CreateMock(api.SecurityGroup)
new_rule.from_port = FROM_PORT
new_rule.to_port = TO_PORT
new_rule.ip_protocol = IP_PROTOCOL
new_rule.cidr = CIDR
new_rule.security_group_id = SECGROUP_ID
new_rule.id = RULE_ID
formData = {'method': 'AddRule',
'tenant_id': self.TEST_TENANT,
'security_group_id': SECGROUP_ID,
'from_port': FROM_PORT,
'to_port': TO_PORT,
'ip_protocol': IP_PROTOCOL,
'cidr': CIDR}
self.mox.StubOutWithMock(api, 'security_group_rule_create')
api.security_group_rule_create(IsA(http.HttpRequest),
SECGROUP_ID, IP_PROTOCOL, FROM_PORT, TO_PORT, CIDR)\
.AndReturn(new_rule)
self.mox.ReplayAll()
res = self.client.post(SG_EDIT_RULE_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_edit_rules_add_rule_exception(self):
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
FROM_PORT = '-1'
TO_PORT = '-1'
IP_PROTOCOL = 'icmp'
CIDR = '0.0.0.0/0'
formData = {'method': 'AddRule',
'tenant_id': self.TEST_TENANT,
'security_group_id': SECGROUP_ID,
'from_port': FROM_PORT,
'to_port': TO_PORT,
'ip_protocol': IP_PROTOCOL,
'cidr': CIDR}
self.mox.StubOutWithMock(api, 'security_group_rule_create')
api.security_group_rule_create(IsA(http.HttpRequest),
SECGROUP_ID, IP_PROTOCOL, FROM_PORT,
TO_PORT, CIDR).AndRaise(exception)
self.mox.ReplayAll()
res = self.client.post(SG_EDIT_RULE_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_edit_rules_delete_rule(self):
RULE_ID = 1
self.mox.StubOutWithMock(api, 'security_group_rule_delete')
api.security_group_rule_delete(IsA(http.HttpRequest), RULE_ID)
self.mox.ReplayAll()
form_data = {"action": "rules__delete__%s" % RULE_ID}
req = self.factory.post(SG_EDIT_RULE_URL, form_data)
table = RulesTable(req, self.rules)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
def test_edit_rules_delete_rule_exception(self):
RULE_ID = 1
self.mox.StubOutWithMock(api, 'security_group_rule_delete')
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
api.security_group_rule_delete(IsA(http.HttpRequest), RULE_ID) \
.AndRaise(exception)
self.mox.ReplayAll()
form_data = {"action": "rules__delete__%s" % RULE_ID}
req = self.factory.post(SG_EDIT_RULE_URL, form_data)
table = RulesTable(req, self.rules)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
def test_delete_group(self):
self.mox.StubOutWithMock(api, 'security_group_delete')
api.security_group_delete(IsA(http.HttpRequest), 2)
self.mox.ReplayAll()
form_data = {"action": "security_groups__delete__%s" % '2'}
req = self.factory.post(INDEX_URL, form_data)
table = SecurityGroupsTable(req, self.security_groups)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
def test_delete_group_exception(self):
self.mox.StubOutWithMock(api, 'security_group_delete')
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
api.security_group_delete(IsA(http.HttpRequest), 2).\
AndRaise(exception)
self.mox.ReplayAll()
form_data = {"action": "security_groups__delete__%s" % '2'}
req = self.factory.post(INDEX_URL, form_data)
table = SecurityGroupsTable(req, self.security_groups)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
| apache-2.0 | -533,934,509,314,937,400 | 35.046595 | 79 | 0.586159 | false |
sonofeft/XYmath | xymath/gui/About_Dialog.py | 1 | 3671 | #!/usr/bin/env python
# -*- coding: ascii -*-
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import object
from tkinter import *
from PIL import Image, ImageTk
from xymath.gui.logo import logo_data
import webbrowser
import os
import sys
if sys.version_info < (3,):
from future import standard_library
standard_library.install_aliases()
from tkSimpleDialog import Dialog
else:
# this is only called incorrectly by pylint using python2
from tkinter.simpledialog import Dialog
here = os.path.abspath(os.path.dirname(__file__))
up_one = os.path.split( here )[0] # Needed to find xymath development version
exec( open(os.path.join( up_one,'_version.py' )).read() ) # creates local __version__ variable
class _Dialog(Dialog):
# use dialogOptions dictionary to set any values in the dialog
def __init__(self, parent, title = None, dialogOptions=None):
self.initComplete = 0
self.dialogOptions = dialogOptions
Dialog.__init__(self, parent, title)
class _About(_Dialog):
def body(self, master):
dialogframe = Frame(master, width=610, height=498)
dialogframe.pack()
self.Canvas_1 = Canvas(dialogframe, width=643, height=157)
self.Canvas_1.pack(anchor=N,side=TOP)
self.photo = PhotoImage(format="gif", data=logo_data)
self.Canvas_1.create_image(0, 0, image=self.photo, anchor=NW)
all_about = 'XYmath is an update of a Turbo Pascal project from my youth.\n' +\
'The above image is a screen shot of that original code.\n' +\
'\nAuthor: Charlie Taylor' + '\n' + 'Copyright (c) 2013 Charlie Taylor' + '\nLicense:' + \
'GPLv3'+ '\nVersion: ' + __version__+ '\nEmail: ' + \
"[email protected]"+ '\nStatus: ' + "4 - Beta"
self.Label_1 = Label(dialogframe,text=all_about, font=("Helvetica bold", 16))
self.Label_1.pack(anchor=NW, side=TOP, expand=1, fill=BOTH)
# LaunchBrowser Button
self.LaunchBrowser_Button = Button(dialogframe,text="Show XYmath Web Page",
font=("Helvetica bold", 16), bg='#000080', fg='#cccccc')
self.LaunchBrowser_Button.bind("<ButtonRelease-1>", self.LaunchBrowser_Button_Click)
self.LaunchBrowser_Button.pack(anchor=NW, side=TOP, expand=1, fill=X)
self.resizable(1,1) # Linux may not respect this
def LaunchBrowser_Button_Click(self, event=None):
webbrowser.open_new('https://sourceforge.net/p/xymath/xywiki/Home/')
def validate(self):
self.result = {} # return a dictionary of results
self.result["test"] = "test message"
return 1
def apply(self):
print('apply called')
class _Testdialog(object):
def __init__(self, master):
frame = Frame(master, width=300, height=300)
frame.pack()
self.master = master
self.x, self.y, self.w, self.h = -1,-1,-1,-1
self.Button_1 = Button(text="Test Dialog", relief="raised", width="15")
self.Button_1.place(x=84, y=36)
self.Button_1.bind("<ButtonRelease-1>", self.Button_1_Click)
def Button_1_Click(self, event): #click method for component ID=1
dialog = _About(self.master, "Test Dialog")
print('===============Result from Dialog====================')
print(dialog.result)
print('=====================================================')
def main():
root = Tk()
app = _Testdialog(root)
root.mainloop()
if __name__ == '__main__':
main()
| gpl-3.0 | 8,395,864,961,358,916,000 | 34.298077 | 100 | 0.620267 | false |
zaina/nova | nova/virt/libvirt/volume.py | 1 | 68849 | # Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import errno
import glob
import os
import platform
import re
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import strutils
import six
from six.moves import urllib
import six.moves.urllib.parse as urlparse
from nova.compute import arch
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import paths
from nova.storage import linuxscsi
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import quobyte
from nova.virt.libvirt import remotefs
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passed to the NFS client. See section '
'of the nfs man page for details'),
cfg.StrOpt('smbfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the SMBFS shares are mounted on the '
'compute node'),
cfg.StrOpt('smbfs_mount_options',
default='',
help='Mount options passed to the SMBFS client. See '
'mount.cifs man page for details. Note that the '
'libvirt-qemu uid and gid must be specified.'),
cfg.IntOpt('num_aoe_discover_tries',
default=3,
help='Number of times to rediscover AoE target to find volume'),
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the glusterfs volume is mounted on the '
'compute node'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
cfg.StrOpt('scality_sofs_config',
help='Path or URL to Scality SOFS configuration file'),
cfg.StrOpt('scality_sofs_mount_point',
default='$state_path/scality',
help='Base dir where Scality SOFS shall be mounted'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]'),
cfg.StrOpt('quobyte_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the Quobyte volume is mounted on the '
'compute node'),
cfg.StrOpt('quobyte_client_cfg',
help='Path to a Quobyte Client configuration file.'),
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
help='The iSCSI transport iface to use to connect to target in '
'case offload support is desired. Default format is of '
'the form <transport_name>.<hwaddress> where '
'<transport_name> is one of (be2iscsi, bnx2i, cxgb3i, '
'cxgb4i, qla4xxx, ocs) and <hwadress> is the MAC address '
'of the interface and can be generated via the '
'iscsiadm -m iface command. Do not confuse the '
'iscsi_iface parameter to be provided here with the '
'actual transport name.'),
# iser is also supported, but use LibvirtISERVolumeDriver
# instead
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._host.get_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in six.iteritems(specs):
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def _get_secret_uuid(self, conf, password=None):
secret = self.connection._host.find_secret(conf.source_protocol,
conf.source_name)
if secret is None:
secret = self.connection._host.create_secret(conf.source_protocol,
conf.source_name,
password)
return secret.UUIDString()
def _delete_secret_by_name(self, connection_info):
source_protocol = connection_info['driver_volume_type']
netdisk_properties = connection_info['data']
if source_protocol == 'rbd':
return
elif source_protocol == 'iscsi':
usage_type = 'iscsi'
usage_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
self.connection._host.delete_secret(usage_type, usage_name)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
pass
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.libvirt.rbd_user:
conf.auth_username = CONF.libvirt.rbd_user
if conf.source_protocol == 'iscsi':
try:
conf.source_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
target_portal = netdisk_properties['target_portal']
except KeyError:
raise exception.NovaException(_("Invalid volume source data"))
ip, port = utils.parse_server_string(target_portal)
if ip == '' or port == '':
raise exception.NovaException(_("Invalid target_lun"))
conf.source_hosts = [ip]
conf.source_ports = [port]
if netdisk_properties.get('auth_method') == 'CHAP':
auth_enabled = True
conf.auth_secret_type = 'iscsi'
password = netdisk_properties.get('auth_password')
conf.auth_secret_uuid = self._get_secret_uuid(conf, password)
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = (conf.auth_secret_type or
netdisk_properties['secret_type'])
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
super(LibvirtNetVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
self._delete_secret_by_name(connection_info)
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
supported_transports = ['be2iscsi', 'bnx2i', 'cxgb3i',
'cxgb4i', 'qla4xxx', 'ocs']
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
is_block_dev=True)
self.num_scan_tries = CONF.libvirt.num_iscsi_scan_tries
self.use_multipath = CONF.libvirt.iscsi_use_multipath
if CONF.libvirt.iscsi_iface:
self.transport = CONF.libvirt.iscsi_iface
else:
self.transport = 'default'
def _get_transport(self):
if self._validate_transport(self.transport):
return self.transport
else:
return 'default'
def _validate_transport(self, transport_iface):
"""Check that given iscsi_iface uses only supported transports
Accepted transport names for provided iface param are
be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx and ocs. iSER uses it's
own separate driver. Note the difference between transport and
iface; unlike iscsi_tcp/iser, this is not one and the same for
offloaded transports, where the default format is
transport_name.hwaddress
"""
# We can support iser here as well, but currently reject it as the
# separate iser driver has not yet been deprecated.
if transport_iface == 'default':
return True
# Will return (6) if iscsi_iface file was not found, or (2) if iscsid
# could not be contacted
out = self._run_iscsiadm_bare(['-m',
'iface',
'-I',
transport_iface],
check_exit_code=[0, 2, 6])[0] or ""
LOG.debug("iscsiadm %(iface)s configuration: stdout=%(out)s",
{'iface': transport_iface, 'out': out})
for data in [line.split() for line in out.splitlines()]:
if data[0] == 'iface.transport_name':
if data[2] in self.supported_transports:
return True
LOG.warn(_LW("No useable transport found for iscsi iface %s. "
"Falling back to default transport"),
transport_iface)
return False
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
msg = ('iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s' %
{'command': iscsi_command, 'out': out, 'err': err})
# NOTE(bpokorny): iscsi_command can contain passwords so we need to
# sanitize the password in the message.
LOG.debug(strutils.mask_password(msg))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
**kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
def _get_target_portals_from_iscsiadm_output(self, output):
# return both portals and iqns
#
# as we are parsing a command line utility, allow for the
# possibility that additional debug data is spewed in the
# stream, and only grab actual ip / iqn lines.
targets = []
for data in [line.split() for line in output.splitlines()]:
if len(data) == 2 and data[1].startswith('iqn.'):
targets.append(data)
return targets
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
iscsi_properties = connection_info['data']
# multipath installed, discovering other targets if available
# multipath should be configured on the nova-compute node,
# in order to fit storage vendor
if self.use_multipath:
out = self._run_iscsiadm_discover(iscsi_properties)
# There are two types of iSCSI multipath devices. One which shares
# the same iqn between multiple portals, and the other which use
# different iqns on different portals. Try to identify the type by
# checking the iscsiadm output if the iqn is used by multiple
# portals. If it is, it's the former, so use the supplied iqn.
# Otherwise, it's the latter, so try the ip,iqn combinations to
# find the targets which constitutes the multipath device.
ips_iqns = self._get_target_portals_from_iscsiadm_output(out)
same_portal = False
all_portals = set()
match_portals = set()
for ip, iqn in ips_iqns:
all_portals.add(ip)
if iqn == iscsi_properties['target_iqn']:
match_portals.add(ip)
if len(all_portals) == len(match_portals):
same_portal = True
for ip, iqn in ips_iqns:
props = iscsi_properties.copy()
props['target_portal'] = ip.split(",")[0]
if not same_portal:
props['target_iqn'] = iqn
self._connect_to_iscsi_portal(props)
self._rescan_iscsi()
else:
self._connect_to_iscsi_portal(iscsi_properties)
# Detect new/resized LUNs for existing sessions
self._run_iscsiadm(iscsi_properties, ("--rescan",))
host_device = self._get_host_device(iscsi_properties)
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
disk_dev = disk_info['dev']
# Check host_device only when transport is used, since otherwise it is
# directly derived from properties. Only needed for unit tests
while ((self._get_transport() != "default" and not host_device)
or not os.path.exists(host_device)):
if tries >= self.num_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
LOG.warn(_LW("ISCSI volume not yet found at: %(disk_dev)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'disk_dev': disk_dev, 'tries': tries})
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(iscsi_properties, ("--rescan",))
# For offloaded open-iscsi transports, host_device cannot be
# guessed unlike iscsi_tcp where it can be obtained from
# properties, so try and get it again.
if not host_device and self._get_transport() != "default":
host_device = self._get_host_device(iscsi_properties)
tries = tries + 1
if not host_device or not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug("Found iSCSI node %(disk_dev)s "
"(after %(tries)s rescans)",
{'disk_dev': disk_dev,
'tries': tries})
if self.use_multipath:
# we use the multipath device instead of the single path device
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device is not None:
host_device = multipath_device
connection_info['data']['multipath_id'] = \
multipath_device.split('/')[-1]
connection_info['data']['device_path'] = host_device
def _run_iscsiadm_discover(self, iscsi_properties):
def run_iscsiadm_update_discoverydb():
return utils.execute(
'iscsiadm',
'-m', 'discoverydb',
'-t', 'sendtargets',
'-p', iscsi_properties['target_portal'],
'--op', 'update',
'-n', "discovery.sendtargets.auth.authmethod",
'-v', iscsi_properties['discovery_auth_method'],
'-n', "discovery.sendtargets.auth.username",
'-v', iscsi_properties['discovery_auth_username'],
'-n', "discovery.sendtargets.auth.password",
'-v', iscsi_properties['discovery_auth_password'],
run_as_root=True)
out = None
if iscsi_properties.get('discovery_auth_method'):
try:
run_iscsiadm_update_discoverydb()
except processutils.ProcessExecutionError as exc:
# iscsiadm returns 6 for "db record not found"
if exc.exit_code == 6:
(out, err) = utils.execute(
'iscsiadm',
'-m', 'discoverydb',
'-t', 'sendtargets',
'-p', iscsi_properties['target_portal'],
'--op', 'new',
run_as_root=True)
run_iscsiadm_update_discoverydb()
else:
raise
out = self._run_iscsiadm_bare(
['-m',
'discoverydb',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal'],
'--discover'],
check_exit_code=[0, 255])[0] or ""
else:
out = self._run_iscsiadm_bare(
['-m',
'discovery',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal']],
check_exit_code=[0, 255])[0] or ""
return out
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
iscsi_properties = connection_info['data']
host_device = self._get_host_device(iscsi_properties)
multipath_device = None
if self.use_multipath:
if 'multipath_id' in iscsi_properties:
multipath_device = ('/dev/mapper/%s' %
iscsi_properties['multipath_id'])
else:
multipath_device = self._get_multipath_device_name(host_device)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
if self.use_multipath and multipath_device:
return self._disconnect_volume_multipath_iscsi(iscsi_properties,
multipath_device)
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_byname = ("ip-%s-iscsi-%s-lun-" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn']))
devices = self.connection._get_all_block_devices()
devices = [dev for dev in devices if (device_byname in dev
and
dev.startswith(
'/dev/disk/by-path/'))]
if not devices:
self._disconnect_from_iscsi_portal(iscsi_properties)
elif host_device not in devices:
# Delete device if LUN is not in use by another instance
self._delete_device(host_device)
def _delete_device(self, device_path):
device_name = os.path.basename(os.path.realpath(device_path))
delete_control = '/sys/block/' + device_name + '/device/delete'
if os.path.exists(delete_control):
# Copy '1' from stdin to the device delete control file
utils.execute('cp', '/dev/stdin', delete_control,
process_input='1', run_as_root=True)
else:
LOG.warn(_LW("Unable to delete volume device %s"), device_name)
def _remove_multipath_device_descriptor(self, disk_descriptor):
disk_descriptor = disk_descriptor.replace('/dev/mapper/', '')
try:
self._run_multipath(['-f', disk_descriptor],
check_exit_code=[0, 1])
except processutils.ProcessExecutionError as exc:
# Because not all cinder drivers need to remove the dev mapper,
# here just logs a warning to avoid affecting those drivers in
# exceptional cases.
LOG.warn(_LW('Failed to remove multipath device descriptor '
'%(dev_mapper)s. Exception message: %(msg)s')
% {'dev_mapper': disk_descriptor,
'msg': exc.message})
def _disconnect_volume_multipath_iscsi(self, iscsi_properties,
multipath_device):
self._rescan_multipath()
block_devices = self.connection._get_all_block_devices()
devices = []
for dev in block_devices:
if "/mapper/" in dev:
devices.append(dev)
else:
mpdev = self._get_multipath_device_name(dev)
if mpdev:
devices.append(mpdev)
# Do a discovery to find all targets.
# Targets for multiple paths for the same multipath device
# may not be the same.
out = self._run_iscsiadm_discover(iscsi_properties)
# Extract targets for the current multipath device.
ips_iqns = []
entries = self._get_iscsi_devices()
for ip, iqn in self._get_target_portals_from_iscsiadm_output(out):
ip_iqn = "%s-iscsi-%s" % (ip.split(",")[0], iqn)
for entry in entries:
entry_ip_iqn = entry.split("-lun-")[0]
if entry_ip_iqn[:3] == "ip-":
entry_ip_iqn = entry_ip_iqn[3:]
elif entry_ip_iqn[:4] == "pci-":
# Look at an offset of len('pci-0000:00:00.0')
offset = entry_ip_iqn.find("ip-", 16, 21)
entry_ip_iqn = entry_ip_iqn[(offset + 3):]
if (ip_iqn != entry_ip_iqn):
continue
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" %
entry)
entry_mpdev = self._get_multipath_device_name(entry_real_path)
if entry_mpdev == multipath_device:
ips_iqns.append([ip, iqn])
break
if not devices:
# disconnect if no other multipath devices
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
# Get a target for all other multipath devices
other_iqns = [self._get_multipath_iqn(device)
for device in devices]
# Get all the targets for the current multipath device
current_iqns = [iqn for ip, iqn in ips_iqns]
in_use = False
for current in current_iqns:
if current in other_iqns:
in_use = True
break
# If no other multipath device attached has the same iqn
# as the current device
if not in_use:
# disconnect if no other multipath devices with same iqn
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
elif multipath_device not in devices:
# delete the devices associated w/ the unused multipath
self._delete_mpath(iscsi_properties, multipath_device, ips_iqns)
# else do not disconnect iscsi portals,
# as they are used for other luns,
# just remove multipath mapping device descriptor
self._remove_multipath_device_descriptor(multipath_device)
return
def _connect_to_iscsi_portal(self, iscsi_properties):
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
# run --op new. Therefore, we check to see if the
# target exists, and if we get 255 (Not Found), then
# we run --op new. This will also happen if another
# volume is using the same target.
try:
self._run_iscsiadm(iscsi_properties, ())
except processutils.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._reconnect(iscsi_properties)
else:
raise
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
"node.session.auth.authmethod",
iscsi_properties['auth_method'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.username",
iscsi_properties['auth_username'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.password",
iscsi_properties['auth_password'])
# duplicate logins crash iscsiadm after load,
# so we scan active sessions to see if the node is logged in.
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in out.splitlines() if p.startswith("tcp:")]
stripped_portal = iscsi_properties['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
iscsi_properties['target_iqn']]
) == 0:
try:
self._run_iscsiadm(iscsi_properties,
("--login",),
check_exit_code=[0, 255])
except processutils.ProcessExecutionError as err:
# as this might be one of many paths,
# only set successful logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
return
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
def _disconnect_from_iscsi_portal(self, iscsi_properties):
self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ("--logout",),
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
check_exit_code=[0, 21, 255])
def _get_multipath_device_name(self, single_path_device):
device = os.path.realpath(single_path_device)
out = self._run_multipath(['-ll',
device],
check_exit_code=[0, 1])[0]
mpath_line = [line for line in out.splitlines()
if "scsi_id" not in line] # ignore udev errors
if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
return None
def _get_iscsi_devices(self):
try:
devices = list(os.walk('/dev/disk/by-path'))[0][-1]
except IndexError:
return []
iscsi_devs = []
for entry in devices:
if (entry.startswith("ip-") or
(entry.startswith('pci-') and 'ip-' in entry)):
iscsi_devs.append(entry)
return iscsi_devs
def _delete_mpath(self, iscsi_properties, multipath_device, ips_iqns):
entries = self._get_iscsi_devices()
# Loop through ips_iqns to construct all paths
iqn_luns = []
for ip, iqn in ips_iqns:
iqn_lun = '%s-lun-%s' % (iqn,
iscsi_properties.get('target_lun', 0))
iqn_luns.append(iqn_lun)
for dev in ['/dev/disk/by-path/%s' % dev for dev in entries]:
for iqn_lun in iqn_luns:
if iqn_lun in dev:
self._delete_device(dev)
self._rescan_multipath()
def _disconnect_mpath(self, iscsi_properties, ips_iqns):
for ip, iqn in ips_iqns:
props = iscsi_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._disconnect_from_iscsi_portal(props)
self._rescan_multipath()
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iscsi-")[1].split("-lun")[0]
return None
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm',
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('multipath',
*multipath_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("multipath %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': multipath_command, 'out': out, 'err': err})
return (out, err)
def _rescan_iscsi(self):
self._run_iscsiadm_bare(('-m', 'node', '--rescan'),
check_exit_code=[0, 1, 21, 255])
self._run_iscsiadm_bare(('-m', 'session', '--rescan'),
check_exit_code=[0, 1, 21, 255])
def _rescan_multipath(self):
self._run_multipath(['-r'], check_exit_code=[0, 1, 21])
def _get_host_device(self, transport_properties):
"""Find device path in devtemfs."""
device = ("ip-%s-iscsi-%s-lun-%s" %
(transport_properties['target_portal'],
transport_properties['target_iqn'],
transport_properties.get('target_lun', 0)))
if self._get_transport() == "default":
return ("/dev/disk/by-path/%s" % device)
else:
host_device = None
look_for_device = glob.glob('/dev/disk/by-path/*%s' % device)
if look_for_device:
host_device = look_for_device[0]
return host_device
def _reconnect(self, iscsi_properties):
# Note: iscsiadm does not support changing iface.iscsi_ifacename
# via --op update, so we do this at creation time
self._run_iscsiadm(iscsi_properties,
('--interface', self._get_transport(),
'--op', 'new'))
class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISERVolumeDriver, self).__init__(connection)
self.num_scan_tries = CONF.libvirt.num_iser_scan_tries
self.use_multipath = CONF.libvirt.iser_use_multipath
def _get_transport(self):
return 'iser'
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iser-")[1].split("-lun")[0]
return None
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
options = connection_info['data'].get('options')
self._ensure_mounted(connection_info['data']['export'], options)
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ('device is busy' in exc.message or
'target is busy' in exc.message):
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, nfs_export, options=None):
"""@type nfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
if not libvirt_utils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), nfs_share)
else:
raise
class LibvirtSMBFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for SMBFS."""
def __init__(self, connection):
super(LibvirtSMBFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
self.username_regex = re.compile(
r"(user(?:name)?)=(?:[^ ,]+\\)?([^ ,]+)")
def _get_device_path(self, connection_info):
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(smbfs_share)
volume_path = os.path.join(mount_path,
connection_info['data']['name'])
return volume_path
def _get_mount_path(self, smbfs_share):
mount_path = os.path.join(CONF.libvirt.smbfs_mount_point_base,
utils.get_hash_str(smbfs_share))
return mount_path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtSMBFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.driver_cache = 'writethrough'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(smbfs_share)
if not libvirt_utils.is_mounted(mount_path, smbfs_share):
mount_options = self._parse_mount_options(connection_info)
remotefs.mount_share(mount_path, smbfs_share,
export_type='cifs', options=mount_options)
device_path = self._get_device_path(connection_info)
connection_info['data']['device_path'] = device_path
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(smbfs_share)
remotefs.unmount_share(mount_path, smbfs_share)
def _parse_mount_options(self, connection_info):
mount_options = " ".join(
[connection_info['data'].get('options') or '',
CONF.libvirt.smbfs_mount_options])
if not self.username_regex.findall(mount_options):
mount_options = mount_options + ' -o username=guest'
else:
# Remove the Domain Name from user name
mount_options = self.username_regex.sub(r'\1=\2', mount_options)
return mount_options.strip(", ").split(' ')
class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach AoE volumes to libvirt."""
def __init__(self, connection):
super(LibvirtAOEVolumeDriver,
self).__init__(connection, is_block_dev=True)
def _aoe_discover(self):
"""Call aoe-discover (aoe-tools) AoE Discover."""
(out, err) = utils.execute('aoe-discover',
run_as_root=True, check_exit_code=0)
return (out, err)
def _aoe_revalidate(self, aoedev):
"""Revalidate the LUN Geometry (When an AoE ID is reused)."""
(out, err) = utils.execute('aoe-revalidate', aoedev,
run_as_root=True, check_exit_code=0)
return (out, err)
def _get_device_path(self, connection_info):
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
return aoedevpath
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtAOEVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def connect_volume(self, connection_info, mount_device):
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
if os.path.exists(aoedevpath):
# NOTE(jbr_): If aoedevpath already exists, revalidate the LUN.
self._aoe_revalidate(aoedev)
else:
# NOTE(jbr_): If aoedevpath does not exist, do a discover.
self._aoe_discover()
# NOTE(jbr_): Device path is not always present immediately
def _wait_for_device_discovery(aoedevpath, mount_device):
tries = self.tries
if os.path.exists(aoedevpath):
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_aoe_discover_tries:
raise exception.NovaException(_("AoE device not found at %s") %
(aoedevpath))
LOG.warn(_LW("AoE volume not yet found at: %(aoedevpath)s. "
"Try number: %(tries)s"),
{'aoedevpath': aoedevpath, 'tries': tries})
self._aoe_discover()
self.tries = self.tries + 1
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, aoedevpath, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if tries != 0:
LOG.debug("Found AoE device %(aoedevpath)s "
"(after %(tries)s rediscover)",
{'aoedevpath': aoedevpath,
'tries': tries})
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for GlusterFS."""
def __init__(self, connection):
"""Create back-end to glusterfs."""
super(LibvirtGlusterfsVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtGlusterfsVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
vol_name = data['export'].split('/')[1]
source_host = data['export'].split('/')[0][:-1]
conf.source_ports = ['24007']
conf.source_type = 'network'
conf.source_protocol = 'gluster'
conf.source_hosts = [source_host]
conf.source_name = '%s/%s' % (vol_name, data['name'])
else:
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, mount_device):
data = connection_info['data']
if 'gluster' not in CONF.libvirt.qemu_allowed_storage_drivers:
self._ensure_mounted(data['export'], data.get('options'))
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
return
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.message:
LOG.debug("The GlusterFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the GlusterFS share %s"),
export)
def _ensure_mounted(self, glusterfs_export, options=None):
"""@type glusterfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(glusterfs_export))
if not libvirt_utils.is_mounted(mount_path, glusterfs_export):
self._mount_glusterfs(mount_path, glusterfs_export,
options, ensure=True)
return mount_path
def _mount_glusterfs(self, mount_path, glusterfs_share,
options=None, ensure=False):
"""Mount glusterfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
gluster_cmd = ['mount', '-t', 'glusterfs']
if options is not None:
gluster_cmd.extend(options.split(' '))
gluster_cmd.extend([glusterfs_share, mount_path])
try:
utils.execute(*gluster_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), glusterfs_share)
else:
raise
class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Fibre Channel Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFibreChannelVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_pci_num(self, hba):
# NOTE(walter-boring)
# device path is in format of
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
# sometimes an extra entry exists before the host2 value
# we always want the value prior to the host2 value
pci_num = None
if hba is not None:
if "device_path" in hba:
index = 0
device_path = hba['device_path'].split('/')
for value in device_path:
if value.startswith('host'):
break
index = index + 1
if index > 0:
pci_num = device_path[index - 1]
return pci_num
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFibreChannelVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def _get_lun_string_for_s390(self, lun):
target_lun = 0
if lun < 256:
target_lun = "0x00%02x000000000000" % lun
elif lun <= 0xffffffff:
target_lun = "0x%08x00000000" % lun
return target_lun
def _get_device_file_path_s390(self, pci_num, target_wwn, lun):
"""Returns device file path"""
# NOTE the format of device file paths depends on the system
# architecture. Most architectures use a PCI based format.
# Systems following the S390, or S390x architecture use a format
# which is based upon the inherent channel architecture (ccw).
host_device = ("/dev/disk/by-path/ccw-%s-zfcp-%s:%s" %
(pci_num,
target_wwn,
lun))
return host_device
def _remove_lun_from_s390(self, connection_info):
"""Rempove lun from s390 configuration"""
# If LUN scanning is turned off on systems following the s390, or
# s390x architecture LUNs need to be removed from the configuration
# using the unit_remove call. The unit_remove call needs to be issued
# for each (virtual) HBA and target_port.
fc_properties = connection_info['data']
lun = int(fc_properties.get('target_lun', 0))
target_lun = self._get_lun_string_for_s390(lun)
ports = fc_properties['target_wwn']
for device_num, target_wwn in self._get_possible_devices(ports):
libvirt_utils.perform_unit_remove_for_s390(device_num,
target_wwn,
target_lun)
def _get_possible_devices(self, wwnports):
"""Compute the possible valid fiber channel device options.
:param wwnports: possible wwn addresses. Can either be string
or list of strings.
:returns: list of (pci_id, wwn) tuples
Given one or more wwn (mac addresses for fiber channel) ports
do the matrix math to figure out a set of pci device, wwn
tuples that are potentially valid (they won't all be). This
provides a search space for the device connection.
"""
# the wwn (think mac addresses for fiber channel devices) can
# either be a single value or a list. Normalize it to a list
# for further operations.
wwns = []
if isinstance(wwnports, list):
for wwn in wwnports:
wwns.append(str(wwn))
elif isinstance(wwnports, six.string_types):
wwns.append(str(wwnports))
raw_devices = []
hbas = libvirt_utils.get_fc_hbas_info()
for hba in hbas:
pci_num = self._get_pci_num(hba)
if pci_num is not None:
for wwn in wwns:
target_wwn = "0x%s" % wwn.lower()
raw_devices.append((pci_num, target_wwn))
return raw_devices
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
fc_properties = connection_info['data']
mount_device = disk_info["dev"]
possible_devs = self._get_possible_devices(fc_properties['target_wwn'])
# map the raw device possibilities to possible host device paths
host_devices = []
for device in possible_devs:
pci_num, target_wwn = device
if platform.machine() in (arch.S390, arch.S390X):
target_lun = self._get_lun_string_for_s390(
fc_properties.get('target_lun', 0))
host_device = self._get_device_file_path_s390(
pci_num,
target_wwn,
target_lun)
libvirt_utils.perform_unit_add_for_s390(
pci_num, target_wwn, target_lun)
else:
host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
(pci_num,
target_wwn,
fc_properties.get('target_lun', 0)))
host_devices.append(host_device)
if len(host_devices) == 0:
# this is empty because we don't have any FC HBAs
msg = _("We are unable to locate any Fibre Channel devices")
raise exception.NovaException(msg)
# The /dev/disk/by-path/... node is not always present immediately
# We only need to find the first device. Once we see the first device
# multipath will have any others.
def _wait_for_device_discovery(host_devices, mount_device):
tries = self.tries
for device in host_devices:
LOG.debug("Looking for Fibre Channel dev %(device)s",
{'device': device})
if os.path.exists(device):
self.host_device = device
# get the /dev/sdX device. This is used
# to find the multipath device.
self.device_name = os.path.realpath(device)
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_iscsi_scan_tries:
msg = _("Fibre Channel device not found.")
raise exception.NovaException(msg)
LOG.warn(_LW("Fibre volume not yet found at: %(mount_device)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'mount_device': mount_device, 'tries': tries})
linuxscsi.rescan_hosts(libvirt_utils.get_fc_hbas_info())
self.tries = self.tries + 1
self.host_device = None
self.device_name = None
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, host_devices, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug("Found Fibre Channel volume %(mount_device)s "
"(after %(tries)s rescans)",
{'mount_device': mount_device,
'tries': tries})
# see if the new drive is part of a multipath
# device. If so, we'll use the multipath device.
mdev_info = linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug("Multipath device discovered %(device)s",
{'device': mdev_info['device']})
device_path = mdev_info['device']
connection_info['data']['device_path'] = device_path
connection_info['data']['devices'] = mdev_info['devices']
connection_info['data']['multipath_id'] = mdev_info['id']
else:
# we didn't find a multipath device.
# so we assume the kernel only sees 1 device
device_path = self.host_device
device_info = linuxscsi.get_device_info(self.device_name)
connection_info['data']['device_path'] = device_path
connection_info['data']['devices'] = [device_info]
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, mount_device):
"""Detach the volume from instance_name."""
super(LibvirtFibreChannelVolumeDriver,
self).disconnect_volume(connection_info, mount_device)
# If this is a multipath device, we need to search again
# and make sure we remove all the devices. Some of them
# might not have shown up at attach time.
if 'multipath_id' in connection_info['data']:
multipath_id = connection_info['data']['multipath_id']
mdev_info = linuxscsi.find_multipath_device(multipath_id)
devices = mdev_info['devices'] if mdev_info else []
LOG.debug("devices to remove = %s", devices)
else:
# only needed when multipath-tools work improperly
devices = connection_info['data'].get('devices', [])
LOG.warn(_LW("multipath-tools probably work improperly. "
"devices to remove = %s.") % devices)
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
# all of them
for device in devices:
linuxscsi.remove_device(device)
if platform.machine() in (arch.S390, arch.S390X):
self._remove_lun_from_s390(connection_info)
class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver):
"""Scality SOFS Nova driver. Provide hypervisors with access
to sparse files on SOFS.
"""
def __init__(self, connection):
"""Create back-end to SOFS and check connection."""
super(LibvirtScalityVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.scality_sofs_mount_point,
connection_info['data']['sofs_path'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtScalityVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
# The default driver cache policy is 'none', and this causes
# qemu/kvm to open the volume file with O_DIRECT, which is
# rejected by FUSE (on kernels older than 3.3). Scality SOFS
# is FUSE based, so we must provide a more sensible default.
conf.driver_cache = 'writethrough'
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
self._check_prerequisites()
self._mount_sofs()
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def _check_prerequisites(self):
"""Sanity checks before attempting to mount SOFS."""
# config is mandatory
config = CONF.libvirt.scality_sofs_config
if not config:
msg = _("Value required for 'scality_sofs_config'")
LOG.warn(msg)
raise exception.NovaException(msg)
# config can be a file path or a URL, check it
if urlparse.urlparse(config).scheme == '':
# turn local path into URL
config = 'file://%s' % config
try:
urllib.request.urlopen(config, timeout=5).close()
except urllib.error.URLError as e:
msg = _("Cannot access 'scality_sofs_config': %s") % e
LOG.warn(msg)
raise exception.NovaException(msg)
# mount.sofs must be installed
if not os.access('/sbin/mount.sofs', os.X_OK):
msg = _("Cannot execute /sbin/mount.sofs")
LOG.warn(msg)
raise exception.NovaException(msg)
def _mount_sofs(self):
config = CONF.libvirt.scality_sofs_config
mount_path = CONF.libvirt.scality_sofs_mount_point
sysdir = os.path.join(mount_path, 'sys')
if not os.path.isdir(mount_path):
utils.execute('mkdir', '-p', mount_path)
if not os.path.isdir(sysdir):
utils.execute('mount', '-t', 'sofs', config, mount_path,
run_as_root=True)
if not os.path.isdir(sysdir):
msg = _("Cannot mount Scality SOFS, check syslog for errors")
LOG.warn(msg)
raise exception.NovaException(msg)
class LibvirtGPFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by gpfs volume."""
def __init__(self, connection):
super(LibvirtGPFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtGPFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "file"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtQuobyteVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for Quobyte."""
def __init__(self, connection):
"""Create back-end to Quobyte."""
super(LibvirtQuobyteVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
conf = super(LibvirtQuobyteVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
conf.source_protocol = quobyte.SOURCE_PROTOCOL
conf.source_type = quobyte.SOURCE_TYPE
conf.driver_cache = quobyte.DRIVER_CACHE
conf.driver_io = quobyte.DRIVER_IO
conf.driver_format = data.get('format', 'raw')
quobyte_volume = self._normalize_url(data['export'])
path = os.path.join(self._get_mount_point_for_share(quobyte_volume),
data['name'])
conf.source_path = path
return conf
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Connect the volume."""
data = connection_info['data']
quobyte_volume = self._normalize_url(data['export'])
mount_path = self._get_mount_point_for_share(quobyte_volume)
mounted = libvirt_utils.is_mounted(mount_path,
quobyte.SOURCE_PROTOCOL
+ '@' + quobyte_volume)
if mounted:
try:
os.stat(mount_path)
except OSError as exc:
if exc.errno == errno.ENOTCONN:
mounted = False
LOG.info(_LI('Fixing previous mount %s which was not'
' unmounted correctly.'), mount_path)
quobyte.umount_volume(mount_path)
if not mounted:
quobyte.mount_volume(quobyte_volume,
mount_path,
CONF.libvirt.quobyte_client_cfg)
quobyte.validate_volume(mount_path)
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
quobyte_volume = self._normalize_url(connection_info['data']['export'])
mount_path = self._get_mount_point_for_share(quobyte_volume)
if libvirt_utils.is_mounted(mount_path, 'quobyte@' + quobyte_volume):
quobyte.umount_volume(mount_path)
else:
LOG.info(_LI("Trying to disconnected unmounted volume at %s"),
mount_path)
def _normalize_url(self, export):
protocol = quobyte.SOURCE_PROTOCOL + "://"
if export.startswith(protocol):
export = export[len(protocol):]
return export
def _get_mount_point_for_share(self, quobyte_volume):
"""Return mount point for Quobyte volume.
:param quobyte_volume: Example: storage-host/openstack-volumes
"""
return os.path.join(CONF.libvirt.quobyte_mount_point_base,
utils.get_hash_str(quobyte_volume))
| apache-2.0 | 2,811,650,872,835,121,700 | 42.219711 | 79 | 0.554402 | false |
FOSSRIT/Nova | controllers/extras.py | 1 | 79892 | # Copyright (C) 2008 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
from networkx.classes.multigraph import MultiGraph
from networkx.classes.multidigraph import MultiDiGraph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
class UbiGraph(MultiGraph):
"""
Base classes for interaction between NetworkX and Ubigraph.
These classes allow drawing with Ubigraph and all of the NetworkX functions.
Examples
--------
(start Ubigraph server)
->>> import networkx
->>> G=nx.UbiGraph()
->>> G.add_edge('a','b',color='#0000ff') # blue edge between 'a' and 'b'
->>> G=nx.UbiGraph(networkx.cycle_graph(5)) # cycle of length 5
See the examples
https://networkx.lanl.gov/browser/networkx/trunk/doc/examples/ubigraph
UbiGraph
--------
NetworkX compatible graph class. Allows self loops and multiple edges.
Extends to NetworkX MultiGraph class.
UbiDiGraph
--------
NetworkX compatible digraph class. Allows self loops and multiple edges.
Extends NetworkX MultiDiGraph class.
Ubigraph attributes
--------------------
In addition to all of the XGraph and XDiGraph methods and NetworkX functions
this class also provides methods to set node and edge attributes and styles.
Node and edge attributes:
->>> G=nx.UbiGraph()
->>> G.add_node('a',shape='torus')
->>> G.add_edge('a','b',style='dashed')
->>> G.set_node_attr('a',color='#0000ff') # node a blue
->>> G.set_node_attr(color='#00ffff') # all nodes green
Node and edge styles:
->>> G=nx.UbiGraph(nx.cycle_graph(5)) # cycle of length 5
->>> redtorus=G.new_node_style(color="#ff0000',shape='torus')
->>> G.set_node_attr(style=redtorus) # all nodes to redtorus style
"""
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True,
nextid=0):
import xmlrpclib
try:
server_url = ubigraph_server
self.server = xmlrpclib.Server(server_url)
self.ubigraph = self.server.ubigraph
if clear:
self.ubigraph.clear()
except:
raise IOError("No Ubigraph server found")
# default node and edge styles
self.ubigraph.set_vertex_style_attribute(0, "color", "#ff0000")
self.ubigraph.set_vertex_style_attribute(0, "shape", "sphere")
self.ubigraph.set_vertex_style_attribute(0, "size", "0.7")
self.ubigraph.set_edge_style_attribute(0, "color", "#ffffff")
self.ubigraph.set_edge_style_attribute(0, "width", "2.0")
self.use_splines=False
self.use_node_labels=False
self.use_edge_labels=False
# keep a mapping from nodes to ubigraph ids
self.nodeid={}
self.nextid=nextid
self.idnode={}
self.adj={} # adjacency list
self.selfloops=selfloops
self.multiedges=multiedges
if data is not None:
self=convert.from_whatever(data,create_using=self)
self.name=name
def add_node(self, n,**kwds):
if n not in self:
MultiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.idnode[self.nextid]=n
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def add_nodes_from(self, nlist,**kwds):
for n in nlist:
self.add_node(n,**kwds)
def delete_node(self,n):
if n in self:
MultiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
id=self.nodeid[n]
del self.nodeid[n]
del self.idnode[id]
def delete_nodes_from(self,nlist):
for n in nlist:
self.delete_node(n)
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e=(u,v,x)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # add x to the end of the list of objects
# that defines the edges between u and v
self.adj[u][v]=self.adj[u].get(v,[])+ [edata]
if u!=v:
self.adj[v][u]=self.adj[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.adj[u][v]=edata
if u!=v:
self.adj[v][u]=edata # a copy would be required to avoid
# modifying both at the same time
# when doing a delete_edge
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def add_edges_from(self, ebunch,**kwds):
for e in ebunch:
self.add_edge(e,**kwds)
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges:
if (self.adj.has_key(u) and self.adj[u].has_key(v)):
x=None
for edata in self.adj[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
# remove the edge item from list
self.adj[u][v].remove(x)
# and if not self loop remove v->u entry
if u!=v:
self.adj[v][u].remove(x)
# if last edge between u and v was deleted, remove all trace
if len(self.adj[u][v])==0:
del self.adj[u][v]
# and if not self loop remove v->u entry
if u!=v:
del self.adj[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_neighbor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.adj[u][v]
if u!=v:
del self.adj[v][u]
def delete_edges_from(self, ebunch):
for e in ebunch:
self.delete_edge(e)
def clear(self):
if len(self)>0:
MultiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
# node and edge attrs
def set_node_attr(self,nbunch=None,style=None,**kwds):
bunch=self.nbunch_iter(nbunch)
for n in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
else:
self.ubigraph.change_vertex_style(self.nodeid[n],style)
def set_edge_attr(self,ebunch=None,style=None,**kwds):
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
for (u,v,d) in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(d['id'],k,v)
else:
ret=self.ubigraph.change_edge_style(d['id'],style)
# node and edge styles
def new_node_style(self,style=0,**kwds):
style=self.ubigraph.new_vertex_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_vertex_style_attribute(style,k,v)
return style
def new_edge_style(self,style=0,**kwds):
style=self.ubigraph.new_edge_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_edge_style_attribute(style,k,v)
return style
# ubigraph helper methods
# an interface to the internal ubigraph methods that do this
# would make this simpler
def splines(self):
"""Toggle spline edges.
"""
if self.use_splines==True:
self.set_edge_attr(spline='false')
self.use_splines=False
else:
self.set_edge_attr(spline='true')
self.use_splines=True
def node_labels(self,nbunch=None,labels=None):
"""Toggle node labels.
"""
bunch=list(self.nbunch_iter(nbunch))
if self.use_node_labels==True:
labels=dict(zip(bunch,['']*len(bunch)))
self.use_node_labels=False
else:
if labels is None:
labels=dict(zip(bunch,bunch))
self.use_node_labels=True
for n,label in labels.items():
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(label))
def edge_labels(self,ebunch=None,labels=None):
"""Toggle edge labels.
"""
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
if self.use_edge_labels==True:
labels=dict([(d['id'],'') for u,v,d in bunch])
self.use_edge_labels=False
else:
if labels is None:
labels=dict([(d['id'],str(d['data'])) for u,v,d in bunch if d['data'] is not None])
self.use_edge_labels=True
for eid,label in labels.items():
self.ubigraph.set_edge_attribute(eid,'label',label)
class UbiDiGraph(UbiGraph,MultiDiGraph):
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True):
self.pred={} # predecessor
self.succ={}
UbiGraph.__init__(self,
data=data,name=name,
selfloops=selfloops,
multiedges=multiedges,
ubigraph_server=ubigraph_server,
clear=clear)
self.ubigraph.set_edge_style_attribute(0, "arrow", "true")
self.adj=self.succ # successor is same as adj for digraph
def add_node(self, n,**kwds):
if n not in self:
MultiDiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def delete_node(self,n):
if n in self:
MultiDiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e a tuple
if len(u)==3: #case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v,x):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # append x to the end of the list of objects
# that defines the edges between u and v
self.succ[u][v]=self.succ[u].get(v,[])+ [edata]
self.pred[v][u]=self.pred[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.succ[u][v]=edata
self.pred[v][u]=edata # note that the same object is referred to
# from both succ and pred
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges: # multiedges are stored as a list
if (self.succ.has_key(u) and self.succ[u].has_key(v)):
x=None
for edata in self.succ[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
self.succ[u][v].remove(x) # remove the edge item from list
self.pred[v][u].remove(x)
if len(self.succ[u][v])==0: # if last edge between u and v
del self.succ[u][v] # was deleted, remove all trace
del self.pred[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_successor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.succ[u][v]
del self.pred[v][u]
return
def clear(self):
if len(self)>0:
MultiDiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2008 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
from networkx.classes.multigraph import MultiGraph
from networkx.classes.multidigraph import MultiDiGraph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
class UbiGraph(MultiGraph):
"""
Base classes for interaction between NetworkX and Ubigraph.
These classes allow drawing with Ubigraph and all of the NetworkX functions.
Examples
--------
(start Ubigraph server)
->>> import networkx
->>> G=nx.UbiGraph()
->>> G.add_edge('a','b',color='#0000ff') # blue edge between 'a' and 'b'
->>> G=nx.UbiGraph(networkx.cycle_graph(5)) # cycle of length 5
See the examples
https://networkx.lanl.gov/browser/networkx/trunk/doc/examples/ubigraph
UbiGraph
--------
NetworkX compatible graph class. Allows self loops and multiple edges.
Extends to NetworkX MultiGraph class.
UbiDiGraph
--------
NetworkX compatible digraph class. Allows self loops and multiple edges.
Extends NetworkX MultiDiGraph class.
Ubigraph attributes
--------------------
In addition to all of the XGraph and XDiGraph methods and NetworkX functions
this class also provides methods to set node and edge attributes and styles.
Node and edge attributes:
->>> G=nx.UbiGraph()
->>> G.add_node('a',shape='torus')
->>> G.add_edge('a','b',style='dashed')
->>> G.set_node_attr('a',color='#0000ff') # node a blue
->>> G.set_node_attr(color='#00ffff') # all nodes green
Node and edge styles:
->>> G=nx.UbiGraph(nx.cycle_graph(5)) # cycle of length 5
->>> redtorus=G.new_node_style(color="#ff0000',shape='torus')
->>> G.set_node_attr(style=redtorus) # all nodes to redtorus style
"""
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True,
nextid=0):
import xmlrpclib
try:
server_url = ubigraph_server
self.server = xmlrpclib.Server(server_url)
self.ubigraph = self.server.ubigraph
if clear:
self.ubigraph.clear()
except:
raise IOError("No Ubigraph server found")
# default node and edge styles
self.ubigraph.set_vertex_style_attribute(0, "color", "#ff0000")
self.ubigraph.set_vertex_style_attribute(0, "shape", "sphere")
self.ubigraph.set_vertex_style_attribute(0, "size", "0.7")
self.ubigraph.set_edge_style_attribute(0, "color", "#ffffff")
self.ubigraph.set_edge_style_attribute(0, "width", "2.0")
self.use_splines=False
self.use_node_labels=False
self.use_edge_labels=False
# keep a mapping from nodes to ubigraph ids
self.nodeid={}
self.nextid=nextid
self.idnode={}
self.adj={} # adjacency list
self.selfloops=selfloops
self.multiedges=multiedges
if data is not None:
self=convert.from_whatever(data,create_using=self)
self.name=name
def add_node(self, n,**kwds):
if n not in self:
MultiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.idnode[self.nextid]=n
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def add_nodes_from(self, nlist,**kwds):
for n in nlist:
self.add_node(n,**kwds)
def delete_node(self,n):
if n in self:
MultiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
id=self.nodeid[n]
del self.nodeid[n]
del self.idnode[id]
def delete_nodes_from(self,nlist):
for n in nlist:
self.delete_node(n)
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e=(u,v,x)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # add x to the end of the list of objects
# that defines the edges between u and v
self.adj[u][v]=self.adj[u].get(v,[])+ [edata]
if u!=v:
self.adj[v][u]=self.adj[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.adj[u][v]=edata
if u!=v:
self.adj[v][u]=edata # a copy would be required to avoid
# modifying both at the same time
# when doing a delete_edge
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def add_edges_from(self, ebunch,**kwds):
for e in ebunch:
self.add_edge(e,**kwds)
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges:
if (self.adj.has_key(u) and self.adj[u].has_key(v)):
x=None
for edata in self.adj[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
# remove the edge item from list
self.adj[u][v].remove(x)
# and if not self loop remove v->u entry
if u!=v:
self.adj[v][u].remove(x)
# if last edge between u and v was deleted, remove all trace
if len(self.adj[u][v])==0:
del self.adj[u][v]
# and if not self loop remove v->u entry
if u!=v:
del self.adj[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_neighbor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.adj[u][v]
if u!=v:
del self.adj[v][u]
def delete_edges_from(self, ebunch):
for e in ebunch:
self.delete_edge(e)
def clear(self):
if len(self)>0:
MultiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
# node and edge attrs
def set_node_attr(self,nbunch=None,style=None,**kwds):
bunch=self.nbunch_iter(nbunch)
for n in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
else:
self.ubigraph.change_vertex_style(self.nodeid[n],style)
def set_edge_attr(self,ebunch=None,style=None,**kwds):
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
for (u,v,d) in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(d['id'],k,v)
else:
ret=self.ubigraph.change_edge_style(d['id'],style)
# node and edge styles
def new_node_style(self,style=0,**kwds):
style=self.ubigraph.new_vertex_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_vertex_style_attribute(style,k,v)
return style
def new_edge_style(self,style=0,**kwds):
style=self.ubigraph.new_edge_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_edge_style_attribute(style,k,v)
return style
# ubigraph helper methods
# an interface to the internal ubigraph methods that do this
# would make this simpler
def splines(self):
"""Toggle spline edges.
"""
if self.use_splines==True:
self.set_edge_attr(spline='false')
self.use_splines=False
else:
self.set_edge_attr(spline='true')
self.use_splines=True
def node_labels(self,nbunch=None,labels=None):
"""Toggle node labels.
"""
bunch=list(self.nbunch_iter(nbunch))
if self.use_node_labels==True:
labels=dict(zip(bunch,['']*len(bunch)))
self.use_node_labels=False
else:
if labels is None:
labels=dict(zip(bunch,bunch))
self.use_node_labels=True
for n,label in labels.items():
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(label))
def edge_labels(self,ebunch=None,labels=None):
"""Toggle edge labels.
"""
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
if self.use_edge_labels==True:
labels=dict([(d['id'],'') for u,v,d in bunch])
self.use_edge_labels=False
else:
if labels is None:
labels=dict([(d['id'],str(d['data'])) for u,v,d in bunch if d['data'] is not None])
self.use_edge_labels=True
for eid,label in labels.items():
self.ubigraph.set_edge_attribute(eid,'label',label)
class UbiDiGraph(UbiGraph,MultiDiGraph):
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True):
self.pred={} # predecessor
self.succ={}
UbiGraph.__init__(self,
data=data,name=name,
selfloops=selfloops,
multiedges=multiedges,
ubigraph_server=ubigraph_server,
clear=clear)
self.ubigraph.set_edge_style_attribute(0, "arrow", "true")
self.adj=self.succ # successor is same as adj for digraph
def add_node(self, n,**kwds):
if n not in self:
MultiDiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def delete_node(self,n):
if n in self:
MultiDiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e a tuple
if len(u)==3: #case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v,x):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # append x to the end of the list of objects
# that defines the edges between u and v
self.succ[u][v]=self.succ[u].get(v,[])+ [edata]
self.pred[v][u]=self.pred[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.succ[u][v]=edata
self.pred[v][u]=edata # note that the same object is referred to
# from both succ and pred
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges: # multiedges are stored as a list
if (self.succ.has_key(u) and self.succ[u].has_key(v)):
x=None
for edata in self.succ[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
self.succ[u][v].remove(x) # remove the edge item from list
self.pred[v][u].remove(x)
if len(self.succ[u][v])==0: # if last edge between u and v
del self.succ[u][v] # was deleted, remove all trace
del self.pred[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_successor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.succ[u][v]
del self.pred[v][u]
return
def clear(self):
if len(self)>0:
MultiDiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2008 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
from networkx.classes.multigraph import MultiGraph
from networkx.classes.multidigraph import MultiDiGraph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
class UbiGraph(MultiGraph):
"""
Base classes for interaction between NetworkX and Ubigraph.
These classes allow drawing with Ubigraph and all of the NetworkX functions.
Examples
--------
(start Ubigraph server)
->>> import networkx
->>> G=nx.UbiGraph()
->>> G.add_edge('a','b',color='#0000ff') # blue edge between 'a' and 'b'
->>> G=nx.UbiGraph(networkx.cycle_graph(5)) # cycle of length 5
See the examples
https://networkx.lanl.gov/browser/networkx/trunk/doc/examples/ubigraph
UbiGraph
--------
NetworkX compatible graph class. Allows self loops and multiple edges.
Extends to NetworkX MultiGraph class.
UbiDiGraph
--------
NetworkX compatible digraph class. Allows self loops and multiple edges.
Extends NetworkX MultiDiGraph class.
Ubigraph attributes
--------------------
In addition to all of the XGraph and XDiGraph methods and NetworkX functions
this class also provides methods to set node and edge attributes and styles.
Node and edge attributes:
->>> G=nx.UbiGraph()
->>> G.add_node('a',shape='torus')
->>> G.add_edge('a','b',style='dashed')
->>> G.set_node_attr('a',color='#0000ff') # node a blue
->>> G.set_node_attr(color='#00ffff') # all nodes green
Node and edge styles:
->>> G=nx.UbiGraph(nx.cycle_graph(5)) # cycle of length 5
->>> redtorus=G.new_node_style(color="#ff0000',shape='torus')
->>> G.set_node_attr(style=redtorus) # all nodes to redtorus style
"""
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True,
nextid=0):
import xmlrpclib
try:
server_url = ubigraph_server
self.server = xmlrpclib.Server(server_url)
self.ubigraph = self.server.ubigraph
if clear:
self.ubigraph.clear()
except:
raise IOError("No Ubigraph server found")
# default node and edge styles
self.ubigraph.set_vertex_style_attribute(0, "color", "#ff0000")
self.ubigraph.set_vertex_style_attribute(0, "shape", "sphere")
self.ubigraph.set_vertex_style_attribute(0, "size", "0.7")
self.ubigraph.set_edge_style_attribute(0, "color", "#ffffff")
self.ubigraph.set_edge_style_attribute(0, "width", "2.0")
self.use_splines=False
self.use_node_labels=False
self.use_edge_labels=False
# keep a mapping from nodes to ubigraph ids
self.nodeid={}
self.nextid=nextid
self.idnode={}
self.adj={} # adjacency list
self.selfloops=selfloops
self.multiedges=multiedges
if data is not None:
self=convert.from_whatever(data,create_using=self)
self.name=name
def add_node(self, n,**kwds):
if n not in self:
MultiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.idnode[self.nextid]=n
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def add_nodes_from(self, nlist,**kwds):
for n in nlist:
self.add_node(n,**kwds)
def delete_node(self,n):
if n in self:
MultiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
id=self.nodeid[n]
del self.nodeid[n]
del self.idnode[id]
def delete_nodes_from(self,nlist):
for n in nlist:
self.delete_node(n)
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e=(u,v,x)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # add x to the end of the list of objects
# that defines the edges between u and v
self.adj[u][v]=self.adj[u].get(v,[])+ [edata]
if u!=v:
self.adj[v][u]=self.adj[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.adj[u][v]=edata
if u!=v:
self.adj[v][u]=edata # a copy would be required to avoid
# modifying both at the same time
# when doing a delete_edge
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def add_edges_from(self, ebunch,**kwds):
for e in ebunch:
self.add_edge(e,**kwds)
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges:
if (self.adj.has_key(u) and self.adj[u].has_key(v)):
x=None
for edata in self.adj[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
# remove the edge item from list
self.adj[u][v].remove(x)
# and if not self loop remove v->u entry
if u!=v:
self.adj[v][u].remove(x)
# if last edge between u and v was deleted, remove all trace
if len(self.adj[u][v])==0:
del self.adj[u][v]
# and if not self loop remove v->u entry
if u!=v:
del self.adj[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_neighbor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.adj[u][v]
if u!=v:
del self.adj[v][u]
def delete_edges_from(self, ebunch):
for e in ebunch:
self.delete_edge(e)
def clear(self):
if len(self)>0:
MultiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
# node and edge attrs
def set_node_attr(self,nbunch=None,style=None,**kwds):
bunch=self.nbunch_iter(nbunch)
for n in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
else:
self.ubigraph.change_vertex_style(self.nodeid[n],style)
def set_edge_attr(self,ebunch=None,style=None,**kwds):
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
for (u,v,d) in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(d['id'],k,v)
else:
ret=self.ubigraph.change_edge_style(d['id'],style)
# node and edge styles
def new_node_style(self,style=0,**kwds):
style=self.ubigraph.new_vertex_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_vertex_style_attribute(style,k,v)
return style
def new_edge_style(self,style=0,**kwds):
style=self.ubigraph.new_edge_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_edge_style_attribute(style,k,v)
return style
# ubigraph helper methods
# an interface to the internal ubigraph methods that do this
# would make this simpler
def splines(self):
"""Toggle spline edges.
"""
if self.use_splines==True:
self.set_edge_attr(spline='false')
self.use_splines=False
else:
self.set_edge_attr(spline='true')
self.use_splines=True
def node_labels(self,nbunch=None,labels=None):
"""Toggle node labels.
"""
bunch=list(self.nbunch_iter(nbunch))
if self.use_node_labels==True:
labels=dict(zip(bunch,['']*len(bunch)))
self.use_node_labels=False
else:
if labels is None:
labels=dict(zip(bunch,bunch))
self.use_node_labels=True
for n,label in labels.items():
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(label))
def edge_labels(self,ebunch=None,labels=None):
"""Toggle edge labels.
"""
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
if self.use_edge_labels==True:
labels=dict([(d['id'],'') for u,v,d in bunch])
self.use_edge_labels=False
else:
if labels is None:
labels=dict([(d['id'],str(d['data'])) for u,v,d in bunch if d['data'] is not None])
self.use_edge_labels=True
for eid,label in labels.items():
self.ubigraph.set_edge_attribute(eid,'label',label)
class UbiDiGraph(UbiGraph,MultiDiGraph):
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True):
self.pred={} # predecessor
self.succ={}
UbiGraph.__init__(self,
data=data,name=name,
selfloops=selfloops,
multiedges=multiedges,
ubigraph_server=ubigraph_server,
clear=clear)
self.ubigraph.set_edge_style_attribute(0, "arrow", "true")
self.adj=self.succ # successor is same as adj for digraph
def add_node(self, n,**kwds):
if n not in self:
MultiDiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def delete_node(self,n):
if n in self:
MultiDiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e a tuple
if len(u)==3: #case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v,x):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # append x to the end of the list of objects
# that defines the edges between u and v
self.succ[u][v]=self.succ[u].get(v,[])+ [edata]
self.pred[v][u]=self.pred[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.succ[u][v]=edata
self.pred[v][u]=edata # note that the same object is referred to
# from both succ and pred
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges: # multiedges are stored as a list
if (self.succ.has_key(u) and self.succ[u].has_key(v)):
x=None
for edata in self.succ[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
self.succ[u][v].remove(x) # remove the edge item from list
self.pred[v][u].remove(x)
if len(self.succ[u][v])==0: # if last edge between u and v
del self.succ[u][v] # was deleted, remove all trace
del self.pred[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_successor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.succ[u][v]
del self.pred[v][u]
return
def clear(self):
if len(self)>0:
MultiDiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2008 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
from networkx.classes.multigraph import MultiGraph
from networkx.classes.multidigraph import MultiDiGraph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
class UbiGraph(MultiGraph):
"""
Base classes for interaction between NetworkX and Ubigraph.
These classes allow drawing with Ubigraph and all of the NetworkX functions.
Examples
--------
(start Ubigraph server)
->>> import networkx
->>> G=nx.UbiGraph()
->>> G.add_edge('a','b',color='#0000ff') # blue edge between 'a' and 'b'
->>> G=nx.UbiGraph(networkx.cycle_graph(5)) # cycle of length 5
See the examples
https://networkx.lanl.gov/browser/networkx/trunk/doc/examples/ubigraph
UbiGraph
--------
NetworkX compatible graph class. Allows self loops and multiple edges.
Extends to NetworkX MultiGraph class.
UbiDiGraph
--------
NetworkX compatible digraph class. Allows self loops and multiple edges.
Extends NetworkX MultiDiGraph class.
Ubigraph attributes
--------------------
In addition to all of the XGraph and XDiGraph methods and NetworkX functions
this class also provides methods to set node and edge attributes and styles.
Node and edge attributes:
->>> G=nx.UbiGraph()
->>> G.add_node('a',shape='torus')
->>> G.add_edge('a','b',style='dashed')
->>> G.set_node_attr('a',color='#0000ff') # node a blue
->>> G.set_node_attr(color='#00ffff') # all nodes green
Node and edge styles:
->>> G=nx.UbiGraph(nx.cycle_graph(5)) # cycle of length 5
->>> redtorus=G.new_node_style(color="#ff0000',shape='torus')
->>> G.set_node_attr(style=redtorus) # all nodes to redtorus style
"""
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True,
nextid=0):
import xmlrpclib
try:
server_url = ubigraph_server
self.server = xmlrpclib.Server(server_url)
self.ubigraph = self.server.ubigraph
if clear:
self.ubigraph.clear()
except:
raise IOError("No Ubigraph server found")
# default node and edge styles
self.ubigraph.set_vertex_style_attribute(0, "color", "#ff0000")
self.ubigraph.set_vertex_style_attribute(0, "shape", "sphere")
self.ubigraph.set_vertex_style_attribute(0, "size", "0.7")
self.ubigraph.set_edge_style_attribute(0, "color", "#ffffff")
self.ubigraph.set_edge_style_attribute(0, "width", "2.0")
self.use_splines=False
self.use_node_labels=False
self.use_edge_labels=False
# keep a mapping from nodes to ubigraph ids
self.nodeid={}
self.nextid=nextid
self.idnode={}
self.adj={} # adjacency list
self.selfloops=selfloops
self.multiedges=multiedges
if data is not None:
self=convert.from_whatever(data,create_using=self)
self.name=name
def add_node(self, n,**kwds):
if n not in self:
MultiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.idnode[self.nextid]=n
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def add_nodes_from(self, nlist,**kwds):
for n in nlist:
self.add_node(n,**kwds)
def delete_node(self,n):
if n in self:
MultiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
id=self.nodeid[n]
del self.nodeid[n]
del self.idnode[id]
def delete_nodes_from(self,nlist):
for n in nlist:
self.delete_node(n)
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e=(u,v,x)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # add x to the end of the list of objects
# that defines the edges between u and v
self.adj[u][v]=self.adj[u].get(v,[])+ [edata]
if u!=v:
self.adj[v][u]=self.adj[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.adj[u][v]=edata
if u!=v:
self.adj[v][u]=edata # a copy would be required to avoid
# modifying both at the same time
# when doing a delete_edge
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def add_edges_from(self, ebunch,**kwds):
for e in ebunch:
self.add_edge(e,**kwds)
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges:
if (self.adj.has_key(u) and self.adj[u].has_key(v)):
x=None
for edata in self.adj[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
# remove the edge item from list
self.adj[u][v].remove(x)
# and if not self loop remove v->u entry
if u!=v:
self.adj[v][u].remove(x)
# if last edge between u and v was deleted, remove all trace
if len(self.adj[u][v])==0:
del self.adj[u][v]
# and if not self loop remove v->u entry
if u!=v:
del self.adj[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_neighbor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.adj[u][v]
if u!=v:
del self.adj[v][u]
def delete_edges_from(self, ebunch):
for e in ebunch:
self.delete_edge(e)
def clear(self):
if len(self)>0:
MultiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
# node and edge attrs
def set_node_attr(self,nbunch=None,style=None,**kwds):
bunch=self.nbunch_iter(nbunch)
for n in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
else:
self.ubigraph.change_vertex_style(self.nodeid[n],style)
def set_edge_attr(self,ebunch=None,style=None,**kwds):
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
for (u,v,d) in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(d['id'],k,v)
else:
ret=self.ubigraph.change_edge_style(d['id'],style)
# node and edge styles
def new_node_style(self,style=0,**kwds):
style=self.ubigraph.new_vertex_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_vertex_style_attribute(style,k,v)
return style
def new_edge_style(self,style=0,**kwds):
style=self.ubigraph.new_edge_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_edge_style_attribute(style,k,v)
return style
# ubigraph helper methods
# an interface to the internal ubigraph methods that do this
# would make this simpler
def splines(self):
"""Toggle spline edges.
"""
if self.use_splines==True:
self.set_edge_attr(spline='false')
self.use_splines=False
else:
self.set_edge_attr(spline='true')
self.use_splines=True
def node_labels(self,nbunch=None,labels=None):
"""Toggle node labels.
"""
bunch=list(self.nbunch_iter(nbunch))
if self.use_node_labels==True:
labels=dict(zip(bunch,['']*len(bunch)))
self.use_node_labels=False
else:
if labels is None:
labels=dict(zip(bunch,bunch))
self.use_node_labels=True
for n,label in labels.items():
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(label))
def edge_labels(self,ebunch=None,labels=None):
"""Toggle edge labels.
"""
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
if self.use_edge_labels==True:
labels=dict([(d['id'],'') for u,v,d in bunch])
self.use_edge_labels=False
else:
if labels is None:
labels=dict([(d['id'],str(d['data'])) for u,v,d in bunch if d['data'] is not None])
self.use_edge_labels=True
for eid,label in labels.items():
self.ubigraph.set_edge_attribute(eid,'label',label)
class UbiDiGraph(UbiGraph,MultiDiGraph):
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True):
self.pred={} # predecessor
self.succ={}
UbiGraph.__init__(self,
data=data,name=name,
selfloops=selfloops,
multiedges=multiedges,
ubigraph_server=ubigraph_server,
clear=clear)
self.ubigraph.set_edge_style_attribute(0, "arrow", "true")
self.adj=self.succ # successor is same as adj for digraph
def add_node(self, n,**kwds):
if n not in self:
MultiDiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def delete_node(self,n):
if n in self:
MultiDiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e a tuple
if len(u)==3: #case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v,x):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # append x to the end of the list of objects
# that defines the edges between u and v
self.succ[u][v]=self.succ[u].get(v,[])+ [edata]
self.pred[v][u]=self.pred[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.succ[u][v]=edata
self.pred[v][u]=edata # note that the same object is referred to
# from both succ and pred
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges: # multiedges are stored as a list
if (self.succ.has_key(u) and self.succ[u].has_key(v)):
x=None
for edata in self.succ[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
self.succ[u][v].remove(x) # remove the edge item from list
self.pred[v][u].remove(x)
if len(self.succ[u][v])==0: # if last edge between u and v
del self.succ[u][v] # was deleted, remove all trace
del self.pred[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_successor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.succ[u][v]
del self.pred[v][u]
return
def clear(self):
if len(self)>0:
MultiDiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2008 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
from networkx.classes.multigraph import MultiGraph
from networkx.classes.multidigraph import MultiDiGraph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
class UbiGraph(MultiGraph):
"""
Base classes for interaction between NetworkX and Ubigraph.
These classes allow drawing with Ubigraph and all of the NetworkX functions.
Examples
--------
(start Ubigraph server)
->>> import networkx
->>> G=nx.UbiGraph()
->>> G.add_edge('a','b',color='#0000ff') # blue edge between 'a' and 'b'
->>> G=nx.UbiGraph(networkx.cycle_graph(5)) # cycle of length 5
See the examples
https://networkx.lanl.gov/browser/networkx/trunk/doc/examples/ubigraph
UbiGraph
--------
NetworkX compatible graph class. Allows self loops and multiple edges.
Extends to NetworkX MultiGraph class.
UbiDiGraph
--------
NetworkX compatible digraph class. Allows self loops and multiple edges.
Extends NetworkX MultiDiGraph class.
Ubigraph attributes
--------------------
In addition to all of the XGraph and XDiGraph methods and NetworkX functions
this class also provides methods to set node and edge attributes and styles.
Node and edge attributes:
->>> G=nx.UbiGraph()
->>> G.add_node('a',shape='torus')
->>> G.add_edge('a','b',style='dashed')
->>> G.set_node_attr('a',color='#0000ff') # node a blue
->>> G.set_node_attr(color='#00ffff') # all nodes green
Node and edge styles:
->>> G=nx.UbiGraph(nx.cycle_graph(5)) # cycle of length 5
->>> redtorus=G.new_node_style(color="#ff0000',shape='torus')
->>> G.set_node_attr(style=redtorus) # all nodes to redtorus style
"""
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True,
nextid=0):
import xmlrpclib
try:
server_url = ubigraph_server
self.server = xmlrpclib.Server(server_url)
self.ubigraph = self.server.ubigraph
if clear:
self.ubigraph.clear()
except:
raise IOError("No Ubigraph server found")
# default node and edge styles
self.ubigraph.set_vertex_style_attribute(0, "color", "#ff0000")
self.ubigraph.set_vertex_style_attribute(0, "shape", "sphere")
self.ubigraph.set_vertex_style_attribute(0, "size", "0.7")
self.ubigraph.set_edge_style_attribute(0, "color", "#ffffff")
self.ubigraph.set_edge_style_attribute(0, "width", "2.0")
self.use_splines=False
self.use_node_labels=False
self.use_edge_labels=False
# keep a mapping from nodes to ubigraph ids
self.nodeid={}
self.nextid=nextid
self.idnode={}
self.adj={} # adjacency list
self.selfloops=selfloops
self.multiedges=multiedges
if data is not None:
self=convert.from_whatever(data,create_using=self)
self.name=name
def add_node(self, n,**kwds):
if n not in self:
MultiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.idnode[self.nextid]=n
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def add_nodes_from(self, nlist,**kwds):
for n in nlist:
self.add_node(n,**kwds)
def delete_node(self,n):
if n in self:
MultiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
id=self.nodeid[n]
del self.nodeid[n]
del self.idnode[id]
def delete_nodes_from(self,nlist):
for n in nlist:
self.delete_node(n)
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e=(u,v,x)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # add x to the end of the list of objects
# that defines the edges between u and v
self.adj[u][v]=self.adj[u].get(v,[])+ [edata]
if u!=v:
self.adj[v][u]=self.adj[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.adj[u][v]=edata
if u!=v:
self.adj[v][u]=edata # a copy would be required to avoid
# modifying both at the same time
# when doing a delete_edge
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def add_edges_from(self, ebunch,**kwds):
for e in ebunch:
self.add_edge(e,**kwds)
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges:
if (self.adj.has_key(u) and self.adj[u].has_key(v)):
x=None
for edata in self.adj[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
# remove the edge item from list
self.adj[u][v].remove(x)
# and if not self loop remove v->u entry
if u!=v:
self.adj[v][u].remove(x)
# if last edge between u and v was deleted, remove all trace
if len(self.adj[u][v])==0:
del self.adj[u][v]
# and if not self loop remove v->u entry
if u!=v:
del self.adj[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_neighbor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.adj[u][v]
if u!=v:
del self.adj[v][u]
def delete_edges_from(self, ebunch):
for e in ebunch:
self.delete_edge(e)
def clear(self):
if len(self)>0:
MultiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
# node and edge attrs
def set_node_attr(self,nbunch=None,style=None,**kwds):
bunch=self.nbunch_iter(nbunch)
for n in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
else:
self.ubigraph.change_vertex_style(self.nodeid[n],style)
def set_edge_attr(self,ebunch=None,style=None,**kwds):
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
for (u,v,d) in bunch:
if style is None:
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(d['id'],k,v)
else:
ret=self.ubigraph.change_edge_style(d['id'],style)
# node and edge styles
def new_node_style(self,style=0,**kwds):
style=self.ubigraph.new_vertex_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_vertex_style_attribute(style,k,v)
return style
def new_edge_style(self,style=0,**kwds):
style=self.ubigraph.new_edge_style(style)
for (k,v) in kwds.items():
self.ubigraph.set_edge_style_attribute(style,k,v)
return style
# ubigraph helper methods
# an interface to the internal ubigraph methods that do this
# would make this simpler
def splines(self):
"""Toggle spline edges.
"""
if self.use_splines==True:
self.set_edge_attr(spline='false')
self.use_splines=False
else:
self.set_edge_attr(spline='true')
self.use_splines=True
def node_labels(self,nbunch=None,labels=None):
"""Toggle node labels.
"""
bunch=list(self.nbunch_iter(nbunch))
if self.use_node_labels==True:
labels=dict(zip(bunch,['']*len(bunch)))
self.use_node_labels=False
else:
if labels is None:
labels=dict(zip(bunch,bunch))
self.use_node_labels=True
for n,label in labels.items():
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(label))
def edge_labels(self,ebunch=None,labels=None):
"""Toggle edge labels.
"""
if ebunch is None:
bunch=self.edges(data=True)
else:
try:
self.has_edge(ebunch)
bunch=[ebunch]
except:
bunch=list(ebunch)
if self.use_edge_labels==True:
labels=dict([(d['id'],'') for u,v,d in bunch])
self.use_edge_labels=False
else:
if labels is None:
labels=dict([(d['id'],str(d['data'])) for u,v,d in bunch if d['data'] is not None])
self.use_edge_labels=True
for eid,label in labels.items():
self.ubigraph.set_edge_attribute(eid,'label',label)
class UbiDiGraph(UbiGraph,MultiDiGraph):
def __init__(self, data=None, name='',
selfloops=False,
multiedges=False,
ubigraph_server= 'http://127.0.0.1:20738/RPC2',
clear=True):
self.pred={} # predecessor
self.succ={}
UbiGraph.__init__(self,
data=data,name=name,
selfloops=selfloops,
multiedges=multiedges,
ubigraph_server=ubigraph_server,
clear=clear)
self.ubigraph.set_edge_style_attribute(0, "arrow", "true")
self.adj=self.succ # successor is same as adj for digraph
def add_node(self, n,**kwds):
if n not in self:
MultiDiGraph.add_node(self,n)
self.nodeid[n]=self.nextid
self.nextid+=1
self.ubigraph.new_vertex_w_id(self.nodeid[n])
# add ubigraph attributes
for (k,v) in kwds.items():
ret=self.ubigraph.set_vertex_attribute(self.nodeid[n],k,v)
# support toggling node labels
if self.use_node_labels:
self.ubigraph.set_vertex_attribute(self.nodeid[n],'label',str(n))
def delete_node(self,n):
if n in self:
MultiDiGraph.delete_node(self,n)
self.ubigraph.remove_vertex(self.nodeid[n])
def add_edge(self, u, v=None, x=None, **kwds):
if v is None: # add_edge was called as add_edge(e), with e a tuple
if len(u)==3: #case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v)
u,v=u # x=None
# if edge exists, quietly return if multiple edges are not allowed
if not self.multiedges and self.has_edge(u,v,x):
return
# add nodes
self.add_node(u)
self.add_node(v)
# self loop? quietly return if not allowed
if not self.selfloops and u==v:
return
# create ubigraph edge
# build dictionary with edge id and user data to use as edge data
e=self.ubigraph.new_edge(self.nodeid[u],self.nodeid[v])
edata={'id':e,'data':x}
if self.multiedges: # append x to the end of the list of objects
# that defines the edges between u and v
self.succ[u][v]=self.succ[u].get(v,[])+ [edata]
self.pred[v][u]=self.pred[v].get(u,[])+ [edata]
else: # x is the new object assigned to single edge between u and v
self.succ[u][v]=edata
self.pred[v][u]=edata # note that the same object is referred to
# from both succ and pred
for (k,v) in kwds.items():
ret=self.ubigraph.set_edge_attribute(e,k,v)
# support toggling edge labels
if self.use_edge_labels:
self.ubigraph.set_edge_attribute(e,'label',str(x))
def delete_edge(self, u, v=None, x=None):
if v is None: # was called as delete_edge(e)
if len(u)==3: # case e=(u,v,x)
u,v,x=u
else: # assume e=(u,v), x unspecified, set to None
u,v=u # x=None
try:
xdata=x['data']
except:
xdata=x
if self.multiedges: # multiedges are stored as a list
if (self.succ.has_key(u) and self.succ[u].has_key(v)):
x=None
for edata in self.succ[u][v]:
if xdata == edata['data']:
x=edata # (u,v,edata) is an edge
eid=edata['id']
if x is None:
return # no edge
self.succ[u][v].remove(x) # remove the edge item from list
self.pred[v][u].remove(x)
if len(self.succ[u][v])==0: # if last edge between u and v
del self.succ[u][v] # was deleted, remove all trace
del self.pred[v][u]
self.ubigraph.remove_edge(eid)
else: # delete single edge
if self.has_successor(u,v):
eid=self.get_edge(u,v)['id']
self.ubigraph.remove_edge(eid)
del self.succ[u][v]
del self.pred[v][u]
return
def clear(self):
if len(self)>0:
MultiDiGraph.clear(self)
self.ubigraph.clear()
self.nodeid={}
self.nextid=0
import networkx as nx
TYPE_COLORS = ["#ff0000", "#ffff00", "#00ff00", "#ffffff", "#ffffff", "#ff0000"]
TYPE_SHAPES = ["octahedron", "sphere", "icosahedron"]
class graph:
def __init__(self):
self.__graph = nx.Graph()
self.__node_updated = []
def connect_ubigraph(self, server=None):
try:
self.__graph = nx.UbiGraph(self.__graph, ubigraph_server=server)
except:
print """
It looks like you are using a version of networkx that has removed
support for ubigraph. I will attempt to load a copy of the old
class.
"""
self.__graph = UbiGraph(self.__graph, ubigraph_server=server)
self.__graph.node_labels()
def add_edit(self, page, id, name):
# Add Page
self.__graph.add_node( page,color=TYPE_COLORS[id], shape=TYPE_SHAPES[0] )
self.__graph.set_node_attr( page, label=name )
def add_edge( self, a, b ):
self.__graph.add_edge(a, b)
def run_graph():
the_graph = graph()
the_graph.connect_ubigraph( "http://localhost:20738/RPC2" )
#populate Nodes
for node in db(db.node.id > 0).select():
the_graph.add_edit( node.url, node.type.id - 1, node.name )
for link in db(db.linkTable.id > 0).select():
the_graph.add_edge(link.nodeId.url, link.linkId.url)
| gpl-3.0 | -6,707,146,173,462,512,000 | 33.615251 | 101 | 0.528664 | false |
isotoma/precog | setup.py | 1 | 1182 | # Copyright 2016 Isotoma Limited
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
setup(
name='precog',
version='0.0.2',
author='Isotoma Limited',
author_email='[email protected]',
description='Git-hooks for flake8, isort and eslint',
url='https://github.com/isotoma/precog',
packages=find_packages(),
test_suite='tests',
install_requires=[
# These can probably be relaxed.
'isort>=4.2.2',
'flake8>=2.4.1',
],
tests_require=['mock'],
license="Apache Software License",
entry_points='''
[console_scripts]
precog = precog.install:install_git_hook
'''
)
| apache-2.0 | 5,478,780,143,963,121,000 | 30.105263 | 74 | 0.692047 | false |
IronLanguages/ironpython3 | Src/StdLib/Lib/socket.py | 1 | 20953 | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
IntEnum constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Integer constants:
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io
from enum import IntEnum
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["fromfd", "getfqdn", "create_connection",
"AddressFamily", "SocketKind"]
__all__.extend(os._get_exports_list(_socket))
# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for
# nicer string representations.
# Note that _socket only knows about the integer values. The public interface
# in this module understands the enums and translates them back from integers
# where needed (e.g. .family property of a socket object).
IntEnum._convert(
'AddressFamily',
__name__,
lambda C: C.isupper() and C.startswith('AF_'))
IntEnum._convert(
'SocketKind',
__name__,
lambda C: C.isupper() and C.startswith('SOCK_'))
_LOCALHOST = '127.0.0.1'
_LOCALHOST_V6 = '::1'
def _intenum_converter(value, enum_klass):
"""Convert a numeric family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
try:
return enum_klass(value)
except ValueError:
return value
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
# For user code address family and type values are IntEnum members, but
# for the underlying _socket.socket they're just integers. The
# constructor of _socket.socket converts the given argument to an
# integer automatically.
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name and socket
address(es).
"""
closed = getattr(self, '_closed', False)
s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \
% (self.__class__.__module__,
self.__class__.__name__,
" [closed]" if closed else "",
self.fileno(),
self.family,
self.type,
self.proto)
if not closed:
try:
laddr = self.getsockname()
if laddr:
s += ", laddr=%s" % str(laddr)
except error:
pass
try:
raddr = self.getpeername()
if raddr:
s += ", raddr=%s" % str(raddr)
except error:
pass
s += '>'
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Duplicate the socket. Return a new socket object connected to the same
system resource. The new socket is non-inheritable.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
# If our type has the SOCK_NONBLOCK flag, we shouldn't pass it onto the
# new socket. We do not currently allow passing SOCK_NONBLOCK to
# accept4, so the returned socket is always blocking.
type = self.type & ~globals().get("SOCK_NONBLOCK", 0)
sock = socket(self.family, type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
@property
def family(self):
"""Read-only access to the address family for this socket.
"""
return _intenum_converter(super().family, AddressFamily)
@property
def type(self):
"""Read-only access to the socket type.
"""
return _intenum_converter(super().type, SocketKind)
if os.name == 'nt':
def get_inheritable(self):
return os.get_handle_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_handle_inheritable(self.fileno(), inheritable)
else:
def get_inheritable(self):
return os.get_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_inheritable(self.fileno(), inheritable)
get_inheritable.__doc__ = "Get the inheritable flag of the socket"
set_inheritable.__doc__ = "Set the inheritable flag of the socket"
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
__all__.append("fromshare")
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
else:
# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
def socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
if family == AF_INET:
host = _LOCALHOST
elif family == AF_INET6:
host = _LOCALHOST_V6
else:
raise ValueError("Only AF_INET and AF_INET6 socket address families "
"are supported")
if type != SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with
# setblocking(False) that prevents us from having to create a thread.
lsock = socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen()
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
return (ssock, csock)
__all__.append("socketpair")
socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is AF_UNIX
if defined on the platform; otherwise, the default is AF_INET.
"""
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise OSError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except InterruptedError:
continue
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
"""Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
"""
# We override this function since we want to translate the numeric family
# and socket type values to enum constants.
addrlist = []
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
af, socktype, proto, canonname, sa = res
addrlist.append((_intenum_converter(af, AddressFamily),
_intenum_converter(socktype, SocketKind),
proto, canonname, sa))
return addrlist
| apache-2.0 | 2,936,274,193,800,808,000 | 34.393581 | 95 | 0.606214 | false |
lehinevych/cfme_tests | cfme/tests/services/test_add_remove_vm_to_service.py | 1 | 3372 | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme.common.provider import cleanup_vm
from cfme.services.catalogs.service_catalogs import ServiceCatalogs
from cfme.services import requests
from cfme.services.catalogs.myservice import MyService
from cfme.automate.simulation import simulate
from cfme.automate.explorer import Domain, Namespace, Class, Method
from utils import testgen
from utils.log import logger
from utils.wait import wait_for
pytestmark = [
pytest.mark.usefixtures("logged_in"),
pytest.mark.usefixtures("vm_name"),
pytest.mark.usefixtures("catalog_item"),
pytest.mark.usefixtures('uses_infra_providers'),
pytest.mark.long_running,
pytest.mark.ignore_stream("upstream"),
pytest.mark.meta(server_roles="+automate"),
pytest.mark.tier(3)
]
pytest_generate_tests = testgen.generate(testgen.provider_by_type, ['virtualcenter'],
scope="module")
@pytest.fixture(scope="function")
def copy_domain(request):
domain = Domain(name=fauxfactory.gen_alphanumeric(), enabled=True)
domain.create()
request.addfinalizer(lambda: domain.delete() if domain.exists() else None)
return domain
@pytest.fixture
def myservice(setup_provider, provider, catalog_item, request):
vm_name = catalog_item.provisioning_data["vm_name"]
request.addfinalizer(lambda: cleanup_vm(vm_name + "_0001", provider))
catalog_item.create()
service_catalogs = ServiceCatalogs("service_name")
service_catalogs.order(catalog_item.catalog, catalog_item)
logger.info('Waiting for cfme provision request for service %s', catalog_item.name)
row_description = catalog_item.name
cells = {'Description': row_description}
row, __ = wait_for(requests.wait_for_request, [cells, True],
fail_func=requests.reload, num_sec=900, delay=20)
assert row.last_message.text == 'Request complete'
return MyService(catalog_item.name, vm_name)
def test_add_vm_to_service(myservice, request, copy_domain):
"""Tests adding vm to service
Metadata:
test_flag: provision
"""
method_torso = """
def add_to_service
vm = $evm.root['vm']
service = $evm.vmdb('service').find_by_name('{}')
user = $evm.root['user']
if service && vm
$evm.log('info', "XXXXXXXX Attaching Service to VM: [#{{service.name}}][#{{vm.name}}]")
vm.add_to_service(service)
vm.owner = user if user
vm.group = user.miq_group if user
end
end
$evm.log("info", "Listing Root Object Attributes:")
$evm.log("info", "===========================================")
add_to_service
""".format(myservice.service_name)
method = Method(
name="InspectMe",
data=method_torso,
cls=Class(
name="Request",
namespace=Namespace(
name="System",
parent=copy_domain
)
)
)
method.create()
request.addfinalizer(lambda: method.delete() if method.exists() else None)
simulate(
instance="Request",
message="create",
request=method.name,
attribute=["VM and Instance", "auto_test_services"], # Random selection, does not matter
execute_methods=True
)
myservice.check_vm_add("auto_test_services")
request.addfinalizer(lambda: myservice.delete(myservice.service_name))
| gpl-2.0 | -5,700,013,254,477,437,000 | 32.058824 | 97 | 0.659253 | false |
mastorak/udolc | udolc/UdolcWindow.py | 1 | 4139 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 <Konstantinos Mastorakis> <mastorak at gmail dot com>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import os,stat
import gettext
from gettext import gettext as _
gettext.textdomain('udolc')
from gi.repository import Gtk # pylint: disable=E0611
import logging
logger = logging.getLogger('udolc')
from udolc_lib import Window
from udolc.AboutUdolcDialog import AboutUdolcDialog
from udolc.InfoDialog import InfoDialog
from udolc.InvalidAttributesDialog import InvalidAttributesDialog
# See udolc_lib.Window.py for more details about how this class works
class UdolcWindow(Window):
__gtype_name__ = "UdolcWindow"
def finish_initializing(self, builder): # pylint: disable=E1002
"""Set up the main window"""
super(UdolcWindow, self).finish_initializing(builder)
self.AboutDialog = AboutUdolcDialog
statusIcon = Gtk.StatusIcon()
statusIcon.set_from_file('data/media/udolc.svg')
statusIcon.set_visible(True)
# Get widgets
self.saveBtn=self.builder.get_object("saveBtn")
self.resetBtn=self.builder.get_object("resetBtn")
self.nameField=self.builder.get_object("nameField")
self.commentField=self.builder.get_object("commentField")
self.execField=self.builder.get_object("execField")
self.iconSelect=self.builder.get_object("iconSelect")
self.terminalCheckbox=self.builder.get_object("terminalCheckbox")
self.typeCombo=self.builder.get_object("typeCombo")
#Initialise widgets
self.iconSelect.set_filename("/usr/share/udolc/media/default_icon.png")
self.typeCombo.set_active(0)
def on_saveBtn_clicked(self,widget):
print "Saving laucher"
name=self.nameField.get_text()
comment=self.commentField.get_text()
if comment=="":
comment=name
executable=self.execField.get_text()
icon=self.iconSelect.get_filename()
launcherType=self.typeCombo.get_active_text()
terminalCheck=self.terminalCheckbox.get_active()
isTerminal="false"
if terminalCheck:
isTerminal="true"
if name=="" or executable=="":
print "Invalid Arguments"
error=InvalidAttributesDialog()
error.show()
return
else:
homeDir=os.getenv("HOME")
copyDir=homeDir+"/.local/share/applications/"
fileName=copyDir+name+".desktop"
f = open(fileName, 'w')
f.write("[Desktop Entry]\n")
f.write("Type=Application\n")
f.write("Name="+name+"\n")
f.write("Comment="+comment+"\n")
f.write("Exec="+executable+"\n")
f.write("Icon="+icon+"\n")
f.write("Terminal="+isTerminal+"\n")
f.write("Categories="+launcherType+";\n")
f.close()
os.chmod(fileName, stat.S_IRWXU)
info=InfoDialog()
os.system("xdg-open "+copyDir)
info.show()
def on_resetBtn_clicked(self,widget):
self.nameField.set_text("")
self.commentField.set_text("")
self.execField.set_text("")
self.iconSelect.set_filename("/usr/share/udolc/media/default_icon.png")
self.typeCombo.set_active(0)
| gpl-3.0 | -7,600,585,048,314,815,000 | 35.955357 | 79 | 0.62648 | false |
goibibo/woof | tests/test_partitioned_prod.py | 1 | 1412 | import logging
import os
import sys
from woof.partitioned_producer import CyclicPartitionedProducer, PartitionedProducer, dumb_hash
# import pdb; pdb.set_trace()
import time
if len(sys.argv) <= 3:
topic = "test.3part"
key = "JY"
msg = "Hello cruel world"
else:
topic = sys.argv[1]
key = sys.argv[2]
msg = sys.argv[3]
logging.basicConfig(
format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',
filename='/tmp/kafkalog',
level=logging.DEBUG
)
logger = logging.getLogger('kafka')
logger.setLevel(logging.INFO)
server = os.getenv("GOMSG_SRV", "localhost:9092")
print server
print topic, key, msg
t1 = time.time()
prod_cyclic = CyclicPartitionedProducer(server, async=True)
print "Cyclic Async Connect time ", time.time() - t1
t1 = time.time()
prod_cyclic.send(topic, key, " [%s] %s" % (str(t1), msg))
print "CyclicSend Async time ", time.time() - t1
t1 = time.time()
prod_keyed = PartitionedProducer(server)
print "Paritioned Connect time ", time.time() - t1
t1 = time.time()
prod_keyed.send(topic, key, " [%s] %s" % (str(t1), msg))
print "Paritioned time ", time.time() - t1
t1 = time.time()
prod_legacy = PartitionedProducer(server, partitioner=dumb_hash)
print "Legacy Prod Connect time ", time.time() - t1
t1 = time.time()
prod_legacy.send(topic, key, " [%s] %s" % (str(t1), msg))
print "Legacy Prod ", time.time() - t1 | apache-2.0 | 5,183,412,907,175,997,000 | 26.705882 | 97 | 0.672805 | false |
martin-green/skojjt | imports.py | 1 | 3816 | # -*- coding: utf-8 -*-
import time
from data import Semester, TaskProgress, UserPrefs
from dataimport import RunScoutnetImport
from google.appengine.ext import deferred, ndb
from flask import Blueprint, render_template, request, make_response, redirect
import_page = Blueprint('import_page', __name__, template_folder='templates')
@import_page.route('/', methods = ['POST', 'GET'])
def import_():
user = UserPrefs.current()
if not user.canImport():
return "denied", 403
breadcrumbs = [{'link':'/', 'text':'Hem'},
{'link':'/import', 'text':'Import'}]
currentSemester = Semester.getOrCreateCurrent()
semesters=[currentSemester]
semesters.extend(Semester.query(Semester.key!=currentSemester.key))
if request.method != 'POST':
return render_template('updatefromscoutnetform.html', heading="Import", breadcrumbs=breadcrumbs, user=user, semesters=semesters)
api_key = request.form.get('apikey').strip()
groupid = request.form.get('groupid').strip()
semester_key=ndb.Key(urlsafe=request.form.get('semester'))
return startAsyncImport(api_key, groupid, semester_key, user, request)
progress = Blueprint('progress_page', 'progress', template_folder='templates')
@progress.route('/<progress_url>')
@progress.route('/<progress_url>/')
@progress.route('/<progress_url>/<update>')
@progress.route('/<progress_url>/<update>/')
def importProgress(progress_url, update=None):
if update is not None:
taskProgress = None
for i in range(1, 2):
taskProgress = ndb.Key(urlsafe=progress_url).get()
if taskProgress is not None:
break
time.sleep(1)
if taskProgress is not None:
s = taskProgress.toJson()
else:
s = '{"messages": ["Error: Hittar inte uppgiften"], "failed": "true", "running": "false"}'
response = make_response(s)
response.headers['Content-Type'] = 'application/json'
return response
breadcrumbs = [{'link':'/', 'text':'Hem'}, {'link':'/import', 'text':'Import'}]
return render_template('importresult.html', tabletitle="Importresultat", rowtitle='Result', breadcrumbs=breadcrumbs)
def startAsyncImport(api_key, groupid, semester_key, user, request):
"""
:type api_key: str
:type groupid: str
:type semester_key: google.appengine.ext.ndb.Key
:type user: data.UserPrefs
:type request: werkzeug.local.LocalProxy
:rtype werkzeug.wrappers.response.Response
"""
taskProgress = TaskProgress(name='Import', return_url=request.url)
taskProgress.put()
deferred.defer(importTask, api_key, groupid, semester_key, taskProgress.key, user.key)
return redirect('/progress/' + taskProgress.key.urlsafe())
def importTask(api_key, groupid, semester_key, taskProgress_key, user_key):
"""
:type api_key: str
:type groupid: str
:type semester_key: google.appengine.ext.ndb.Key
:type taskProgress_key: google.appengine.ext.ndb.Key
:type user_key: google.appengine.ext.ndb.Key
"""
semester = semester_key.get() # type: data.Semester
user = user_key.get() # type: data.UserPrefs
progress = None
for i in range(1, 3):
progress = taskProgress_key.get() # type: data.TaskProgress
if progress is not None:
break
time.sleep(1) # wait for the eventual consistency
try:
success = RunScoutnetImport(groupid, api_key, user, semester, progress)
if not success:
progress.info("Importen misslyckades")
progress.failed = True
else:
progress.info("Import klar")
except Exception as e: # catch all exceptions so that defer stops running it again (automatic retry)
progress.info("Importfel: " + str(e))
progress.done()
| apache-2.0 | 3,902,891,039,809,497,600 | 38.340206 | 136 | 0.659329 | false |
bsgbryan/Ardus | node_modules/microtime/build/c4che/default.cache.py | 1 | 1405 | AR = '/usr/bin/ar'
ARFLAGS = 'rcs'
CCFLAGS = ['-g']
CCFLAGS_MACBUNDLE = ['-fPIC']
CCFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CC_VERSION = ('4', '2', '1')
COMPILER_CXX = 'g++'
CPP = '/usr/bin/cpp'
CPPFLAGS_NODE = ['-D_GNU_SOURCE', '-DEV_MULTIPLICITY=0']
CPPPATH_NODE = '/usr/local/include/node'
CPPPATH_ST = '-I%s'
CXX = ['/usr/bin/g++']
CXXDEFINES_ST = '-D%s'
CXXFLAGS = ['-g']
CXXFLAGS_DEBUG = ['-g']
CXXFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CXXFLAGS_RELEASE = ['-O2']
CXXLNK_SRC_F = ''
CXXLNK_TGT_F = ['-o', '']
CXX_NAME = 'gcc'
CXX_SRC_F = ''
CXX_TGT_F = ['-c', '-o', '']
DEST_CPU = 'x86_64'
DEST_OS = 'darwin'
FULLSTATIC_MARKER = '-static'
LIBDIR = '/Users/maynardb/.node_libraries'
LIBPATH_NODE = '/usr/local/lib'
LIBPATH_ST = '-L%s'
LIB_ST = '-l%s'
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINK_CXX = ['/usr/bin/g++']
NODE_PATH = '/Users/maynardb/.node_libraries'
PREFIX = '/usr/local'
PREFIX_NODE = '/usr/local'
RANLIB = '/usr/bin/ranlib'
RPATH_ST = '-Wl,-rpath,%s'
SHLIB_MARKER = ''
SONAME_ST = ''
STATICLIBPATH_ST = '-L%s'
STATICLIB_MARKER = ''
STATICLIB_ST = '-l%s'
macbundle_PATTERN = '%s.bundle'
program_PATTERN = '%s'
shlib_CXXFLAGS = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
shlib_LINKFLAGS = ['-dynamiclib']
shlib_PATTERN = 'lib%s.dylib'
staticlib_LINKFLAGS = []
staticlib_PATTERN = 'lib%s.a'
| mit | -3,397,733,078,171,443,700 | 28.270833 | 82 | 0.625623 | false |
Elastica/kombu | kombu/clocks.py | 1 | 4635 | """
kombu.clocks
============
Logical Clocks and Synchronization.
"""
from __future__ import absolute_import, unicode_literals
from threading import Lock
from itertools import islice
from operator import itemgetter
from .five import python_2_unicode_compatible, zip
__all__ = ['LamportClock', 'timetuple']
R_CLOCK = '_lamport(clock={0}, timestamp={1}, id={2} {3!r})'
@python_2_unicode_compatible
class timetuple(tuple):
"""Tuple of event clock information.
Can be used as part of a heap to keep events ordered.
:param clock: Event clock value.
:param timestamp: Event UNIX timestamp value.
:param id: Event host id (e.g. ``hostname:pid``).
:param obj: Optional obj to associate with this event.
"""
__slots__ = ()
def __new__(cls, clock, timestamp, id, obj=None):
return tuple.__new__(cls, (clock, timestamp, id, obj))
def __repr__(self):
return R_CLOCK.format(*self)
def __getnewargs__(self):
return tuple(self)
def __lt__(self, other):
# 0: clock 1: timestamp 3: process id
try:
A, B = self[0], other[0]
# uses logical clock value first
if A and B: # use logical clock if available
if A == B: # equal clocks use lower process id
return self[2] < other[2]
return A < B
return self[1] < other[1] # ... or use timestamp
except IndexError:
return NotImplemented
def __gt__(self, other):
return other < self
def __le__(self, other):
return not other < self
def __ge__(self, other):
return not self < other
clock = property(itemgetter(0))
timestamp = property(itemgetter(1))
id = property(itemgetter(2))
obj = property(itemgetter(3))
@python_2_unicode_compatible
class LamportClock(object):
"""Lamport's logical clock.
From Wikipedia:
A Lamport logical clock is a monotonically incrementing software counter
maintained in each process. It follows some simple rules:
* A process increments its counter before each event in that process;
* When a process sends a message, it includes its counter value with
the message;
* On receiving a message, the receiver process sets its counter to be
greater than the maximum of its own value and the received value
before it considers the message received.
Conceptually, this logical clock can be thought of as a clock that only
has meaning in relation to messages moving between processes. When a
process receives a message, it resynchronizes its logical clock with
the sender.
.. seealso::
* `Lamport timestamps`_
* `Lamports distributed mutex`_
.. _`Lamport Timestamps`: http://en.wikipedia.org/wiki/Lamport_timestamps
.. _`Lamports distributed mutex`: http://bit.ly/p99ybE
*Usage*
When sending a message use :meth:`forward` to increment the clock,
when receiving a message use :meth:`adjust` to sync with
the time stamp of the incoming message.
"""
#: The clocks current value.
value = 0
def __init__(self, initial_value=0, Lock=Lock):
self.value = initial_value
self.mutex = Lock()
def adjust(self, other):
with self.mutex:
value = self.value = max(self.value, other) + 1
return value
def forward(self):
with self.mutex:
self.value += 1
return self.value
def sort_heap(self, h):
"""List of tuples containing at least two elements, representing
an event, where the first element is the event's scalar clock value,
and the second element is the id of the process (usually
``"hostname:pid"``): ``sh([(clock, processid, ...?), (...)])``
The list must already be sorted, which is why we refer to it as a
heap.
The tuple will not be unpacked, so more than two elements can be
present.
Will return the latest event.
"""
if h[0][0] == h[1][0]:
same = []
for PN in zip(h, islice(h, 1, None)):
if PN[0][0] != PN[1][0]:
break # Prev and Next's clocks differ
same.append(PN[0])
# return first item sorted by process id
return sorted(same, key=lambda event: event[1])[0]
# clock values unique, return first item
return h[0]
def __str__(self):
return str(self.value)
def __repr__(self):
return '<LamportClock: {0.value}>'.format(self)
| bsd-3-clause | 5,739,204,968,868,085,000 | 28.711538 | 77 | 0.604962 | false |
miquelcampos/GEAR_mc | gear/xsi/rig/component/chain_01/guide.py | 1 | 3991 | '''
This file is part of GEAR.
GEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin [email protected]
Url: http://gear.jeremiepasserin.com
Date: 2010 / 11 / 15
'''
## @package gear.xsi.rig.component.chain_01.guide
# @author Jeremie Passerin
#
##########################################################
# GLOBAL
##########################################################
# gear
from gear.xsi import c
from gear.xsi.rig.component.guide import ComponentGuide
# guide info
AUTHOR = "Jeremie Passerin"
URL = "http://www.jeremiepasserin.com"
EMAIL = "[email protected]"
VERSION = [1,0,1]
TYPE = "chain_01"
NAME = "chain"
DESCRIPTION = "Simple ik/fk chain"
##########################################################
# CLASS
##########################################################
class Guide(ComponentGuide):
compType = TYPE
compName = NAME
description = DESCRIPTION
author = AUTHOR
url = URL
email = EMAIL
version = VERSION
compatible = ["tail_01", "chain_cns_01"]
# =====================================================
##
# @param self
def postInit(self):
self.pick_transform = ["root", "#_loc"]
self.save_transform = ["root", "#_loc"]
self.save_blade = ["blade"]
self.addMinMax("#_loc", 1, -1)
# =====================================================
## Add more object to the object definition list.
# @param self
def addObjects(self):
self.root = self.addRoot()
self.locs = self.addLocMulti("#_loc", self.root)
self.blade = self.addBlade("blade", self.root, self.locs[0])
centers = [self.root]
centers.extend(self.locs)
self.dispcrv = self.addDispCurve("crv", centers)
# =====================================================
## Add more parameter to the parameter definition list.
# @param self
def addParameters(self):
self.pType = self.addParam("type", c.siInt4, 0, 0, None)
self.pBlend = self.addParam("blend", c.siInt4, 0, 0, 1)
self.pNeutralPose = self.addParam("neutralpose", c.siBool, False)
# =====================================================
## Add layout for new parameters.
# @param self
def addLayout(self):
# --------------------------------------------------
# Items
typeItems = ["fk only", 0,
"ik only", 1,
"ik / fk", 2]
blendItems = ["fk", 0,
"ik", 1]
# --------------------------------------------------
# Layout
tab = self.layout.addTab("Options")
group = tab.addGroup("Kinematic")
group.addEnumControl(self.pType.scriptName, typeItems, "Type", c.siControlCombo)
item = group.addItem(self.pNeutralPose.scriptName, "Set Neutral Pose on FK Controlers")
item.addCondition("PPG."+self.pType.scriptName+".Value != 1")
item = group.addEnumControl(self.pBlend.scriptName, blendItems, "Default blend", c.siControlCombo)
item.addCondition("PPG."+self.pType.scriptName+".Value == 2")
# =====================================================
## Add logic for new layout.
# @param self
def addLogic(self):
self.logic.addOnChangedRefresh(self.pType.scriptName)
| lgpl-3.0 | 630,757,503,374,750,000 | 31.185484 | 106 | 0.527938 | false |
google/skia | infra/bots/assets/cmake_mac/create.py | 1 | 1130 | #!/usr/bin/env python
#
# Copyright 2019 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
import sys
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
INFRA_BOTS_DIR = os.path.realpath(os.path.join(FILE_DIR, os.pardir, os.pardir))
sys.path.insert(0, INFRA_BOTS_DIR)
import utils
VERSION = '3.13.4'
URL = ('https://github.com/Kitware/CMake/releases/download/v%s/'
'cmake-%s-Darwin-x86_64.tar.gz') % (VERSION, VERSION)
def create_asset(target_dir):
"""Create the asset."""
with utils.tmp_dir():
subprocess.check_call(['wget', URL, '--output-document=cmake.tar.gz'])
subprocess.check_call(['tar', '--extract', '--gunzip', '--file',
'cmake.tar.gz', '--directory', target_dir,
'--strip-components', '1'])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
| bsd-3-clause | -5,843,847,538,980,746,000 | 24.111111 | 79 | 0.635398 | false |
shengqh/ngsperl | lib/QC/bamSoftClipPosition.py | 1 | 3130 | import pysam
import argparse
import sys
import logging
import os
from asyncore import read
parser = argparse.ArgumentParser(description="Build soft clip position distribution in BAM file.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
DEBUG=False
NOT_DEBUG = not DEBUG
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input BAM file', required=NOT_DEBUG)
parser.add_argument('--min-mapq', action='store', nargs='?', type=int, default=10, help="Minimum mapping quality of read")
parser.add_argument('--binsize', action='store', nargs='?', type=int, default=1000, help="Bin size of position")
parser.add_argument('--min-depth', action='store', nargs='?', type=int, default=100, help="Minimum depth for output")
parser.add_argument('-o', '--output', action='store', nargs='?', help="Output soft clip distribution file name", required=NOT_DEBUG)
if NOT_DEBUG and len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if DEBUG:
args.input = "/scratch/cqs/shengq2/jennifer/20190906_lindsay_exomeseq_3772_hg38/softclip/P_175_06.indel.recal.TP53.bam"
args.output = "/scratch/cqs/shengq2/jennifer/20190906_lindsay_exomeseq_3772_hg38/softclip/P_175_06.softclip.position.tsv"
logger = logging.getLogger('bamSoftClipPosition')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
def filterReadQuality(read, min_mapq):
return(read.is_unmapped or read.mapping_quality < min_mapq or read.is_secondary or read.is_qcfail or read.is_duplicate or read.is_supplementary)
def hasSoftClip(read):
return("S" in read.cigarstring)
chrPositionMap = {}
processed = 0
logger.info("reading %s" % args.input)
with pysam.Samfile(args.input, "rb") as samfile:
for read in samfile.fetch(until_eof=True):
processed += 1
if processed % 1000000 == 0:
logger.info("processed %d" % processed)
#break
if filterReadQuality(read, args.min_mapq):
continue
if len(read.reference_name) > 5:
continue
if not read.reference_name in chrPositionMap:
chrPositionMap[read.reference_name] = {}
positionMap = chrPositionMap[read.reference_name]
position = int(read.reference_start / args.binsize)
if not position in positionMap:
positionMap[position] = [0, 0]
posvalues = positionMap[position]
if hasSoftClip(read):
posvalues[0] = posvalues[0] + 1
else:
posvalues[1] = posvalues[1] + 1
with open(args.output, "wt") as sw:
sw.write("Chr\tStartPosition\tSoftClipRead\tOtherRead\tSoftClipPerc\n")
for chr in chrPositionMap.keys():
positionMap = chrPositionMap[chr]
positions = sorted(positionMap.keys())
for pos in positions:
posvalues = positionMap[pos]
sread = posvalues[0]
oread = posvalues[1]
allread = sread + oread
if allread >= args.min_depth:
sw.write("%s\t%d\t%d\t%d\t%.2f\n" % (chr, pos * args.binsize, sread, oread, sread * 1.0 / allread) )
logger.info("done.") | apache-2.0 | 2,228,004,793,218,113,500 | 36.195122 | 146 | 0.675399 | false |
msg/g2ools | nord/convert/lfo.py | 1 | 10685 | #
# lfo.py - Lfo tab conversion objects
#
# Copyright (c) 2006,2007 Matt Gerassimoff
#
# This file is part of g2ools.
#
# g2ools is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# g2ools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from nord.utils import setv, getv, isnm1osc
from nord.nm1.colors import nm1conncolors
from nord.convert import Convert
from nord.convert.convert import handleoscmasterslv, handlekbt, doslvcables
from nord.convert.table import modtable
def handleslv(conv, ratemodin, ratemodparam):
nmm, g2m = conv.nmmodule, conv.g2module
nmmp, g2mp = nmm.params, g2m.params
slv, kbt = None, g2m.inputs.Rate
if len(nmm.outputs.Slv.cables):
oscmaster = conv.add_module('OscMaster')
setv(g2mp.Rate, 64)
setv(oscmaster.params.Kbt, 0) # Off
setv(oscmaster.params.FreqCoarse, getv(nmmp.Rate))
#setv(oscmaster.params.PitchMod, modtable[getv(nmmp.RateMod)][0])
conv.connect(oscmaster.outputs.Out, g2m.inputs.Rate)
ratemodin = oscmaster.inputs.PitchVar
ratemodparam = oscmaster.params.PitchMod
slv = g2m.inputs.Rate
kbt = oscmaster.inputs.Pitch
conv.kbt = oscmaster.params.Kbt
if getv(nmmp.Range) == 1: # Lo
slv = handleoscmasterslv(conv, oscmaster, 64, 40, 50, 103, 41, True)
else:
slv = handleoscmasterslv(conv, oscmaster, 76, 64, 52, 104, 35, False)
# add fine tuning
if len(nmm.inputs.Rate.cables):
mod = getv(nmmp.RateMod)
if mod == 0 or mod == 127:
setv(ratemodparam, mod)
else:
setv(ratemodparam, modtable[mod][0])
adj = conv.add_module('Mix2-1B', name='PitchAdj')
conv.connect(adj.outputs.Out, ratemodin)
conv.connect(adj.inputs.Chain, adj.inputs.In1)
conv.connect(adj.inputs.In1, adj.inputs.In2)
setv(adj.params.Inv1, 1)
setv(adj.params.Lev1, modtable[mod][1])
setv(adj.params.Lev2, modtable[mod][2])
ratemodin = adj.inputs.Chain
return ratemodin, ratemodparam, slv, kbt
def postmst(conv, mstindex):
nmm, g2m = conv.nmmodule, conv.g2module
nmmp, g2mp = nmm.params, g2m.params
mstin = nmm.inputs.Mst
if not len(mstin.cables):
return
if not mstin.net.output:
return
mstconv = mstin.net.output.module.conv
mst = mstconv.g2module
if hasattr(mst.params, 'PolyMono'):
setv(g2mp.PolyMono, getv(mst.params.PolyMono))
if hasattr(mst.params, 'Kbt') and hasattr(g2mp, 'Kbt'):
setv(g2mp.Kbt, getv(mst.params.Kbt))
if mstin.net.output.rate != nm1conncolors.slave:
oscc = conv.add_module('OscC', name='')
setv(oscc.params.FreqCoarse, 0)
setv(oscc.params.FmAmount, 79)
setv(oscc.params.Kbt, 0)
pout = conv.add_module('ZeroCnt', name='')
conv.connect(oscc.outputs.Out, pout.inputs.In)
conv.connect(pout.outputs.Out, g2m.inputs.Rate)
setv(g2mp.Range, 2)
conv.inputs[mstindex] = oscc.inputs.FmMod
return
if isnm1osc(mst):
setv(g2mp.Range, 2)
elif hasattr(mst.params, 'Range'):
setv(g2mp.Range, getv(mst.params.Range))
else:
setv(g2mp.Range, 1)
class ConvLFOA(Convert):
maing2module = 'LfoB'
parammap = ['Rate', 'Range', 'Waveform', 'RateMod', ['PolyMono', 'Mono'],
None, 'Phase', ['Active', 'Mute']]
inputmap = ['Rate', 'Rst']
outputmap = ['', 'Out'] # Slv
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
# handle special parameters
waveform = getv(nmmp.Waveform)
setv(g2mp.Waveform, [0, 1, 2, 2, 3][waveform])
if waveform != 3:
setv(g2mp.OutputType, 5) # BipInv
else:
# 180 phase
setv(g2mp.Phase, (range(64, 128)+range(64))[getv(nmmp.Phase)])
setv(g2mp.Active, 1-getv(nmmp.Mute))
self.kbt = g2m.params.Kbt
# update Rate input, Slv Output
ratemodin, rateparam, slv, kbt = handleslv(self,
g2m.inputs.RateVar, g2mp.RateMod)
self.inputs[0], self.outputs[0], kbt = ratemodin, slv, kbt
self.kbtout = handlekbt(self, kbt, 4, False)
def precables(self):
doslvcables(self)
class ConvLFOB(Convert):
maing2module = 'LfoShpA'
parammap = ['Rate', 'Range', 'Phase', 'RateMod', ['PolyMono', 'Mono'],
None, ['PhaseMod', 'PwMod'], ['Shape', 'Pw']]
inputmap = ['Rate', 'Rst', 'ShapeMod']
outputmap = ['Out', ''] # Slv
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.Waveform, 5)
setv(g2mp.OutputType, 5) # BipInv
setv(g2mp.PhaseMod, getv(nmmp.PwMod))
self.kbt = g2m.params.Kbt
ratemodin, rateparam, slv, kbt = handleslv(self,
g2m.inputs.RateVar, g2mp.RateMod)
self.inputs[0], self.outputs[1], kbt = ratemodin, slv, kbt
self.kbtout = handlekbt(self, kbt, 4, False)
def precables(self):
doslvcables(self)
class ConvLFOC(Convert):
maing2module = 'LfoA'
parammap = ['Rate', 'Range', 'Waveform', 'RateMod', ['PolyMono', 'Mono'],
['Active', 'Mute']]
inputmap = ['RateVar']
outputmap = ['Out', '']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
waveform = getv(nmmp.Waveform)
setv(g2mp.Waveform, [0, 1, 2, 2, 3][waveform])
if waveform != 3:
setv(g2mp.OutputType, 5) # BipInv
setv(g2mp.Active, 1-getv(nmmp.Mute))
self.kbt = g2m.params.Kbt
ratemodin, rateparam, slv, kbt = handleslv(self,
g2m.inputs.RateVar, g2mp.RateMod)
self.inputs[0], self.outputs[1], kbt = ratemodin, slv, kbt
def precables(self):
doslvcables(self)
class ConvLFOSlvA(Convert):
maing2module = 'LfoB'
parammap = ['Rate', 'Phase', 'Waveform', ['PolyMono', 'Mono'],
['Active', 'Mute']]
inputmap = ['Rate', 'Rst']
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
# handle special parameters
waveform = getv(nmmp.Waveform)
setv(g2mp.Waveform, [0, 1, 2, 2, 3][waveform])
if waveform != 3:
setv(g2mp.OutputType, 5) # BipInv
else:
# 180 phase
setv(g2mp.Phase, (range(64, 128)+range(64))[getv(nmmp.Phase)])
setv(g2mp.Active, 1-getv(nmmp.Mute))
postmst(self, 0)
class ConvLFOSlvB(Convert):
maing2module = 'LfoC'
waveform = 2
parammap = ['Rate']
inputmap = ['Rate']
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
# handle special parameters
g2m.modes.Waveform.value = self.waveform
if self.waveform != 2:
setv(g2mp.OutputType, 4) # Bip
else:
setv(g2mp.OutputType, 5) # BipInv
postmst(self, 0)
class ConvLFOSlvC(ConvLFOSlvB):
waveform = 0
#3phase thinks we may need this. I'm leaving it as a comment for now.
#def domodule(self):
# ConvLFOSlvB.domodule(self)
# setv(self.g2module.params.OutputType, 5) # BipInv
class ConvLFOSlvD(ConvLFOSlvB):
waveform = 3
class ConvLFOSlvE(ConvLFOSlvC):
waveform = 1
class ConvClkGen(Convert):
maing2module = 'ClkGen'
parammap = ['Rate', ['Active', 'On/Off']]
inputmap = ['Rst']
outputmap = ['1/96', '1/16', '', 'Sync']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.Active, getv(getattr(nmmp, 'On/Off')))
setv(g2mp.Source, 0) # Internal
if len(nmm.outputs.Sync.cables) != 0:
pulse = self.add_module('Pulse')
setv(pulse.params.Time, 32)
self.connect(g2m.outputs.ClkActive, pulse.inputs.In)
self.outputs[3] = pulse.outputs.Out
#handle Slv connections
if len(nmm.outputs.Slv.cables):
zerocnt = self.add_module('ZeroCnt', name='96th In')
oscmaster = self.add_module('OscMaster', name='26-241 BPM')
setv(oscmaster.params.FreqCoarse, 9) # -55 semi
setv(oscmaster.params.Kbt, 0) # off
self.connect(getattr(g2m.outputs, '1/96'), zerocnt.inputs.In)
self.connect(zerocnt.outputs.Out, oscmaster.inputs.Pitch)
self.outputs[2] = oscmaster.outputs.Out
class ConvClkRndGen(Convert):
maing2module = 'RndClkA'
parammap = [['PolyMono', 'Mono'], ['StepProb', 'Color']]
inputmap = ['Clk']
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
if getv(nmmp.Color) == 1:
setv(g2mp.StepProb, 43)
else:
setv(g2mp.StepProb, 127)
class ConvRndStepGen(ConvLFOSlvB):
waveform = 4
class ConvRandomGen(ConvLFOSlvB):
waveform = 5
class ConvRndPulseGen(Convert):
maing2module = 'RndTrig'
parammap = [['StepProb', 'Density']]
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.StepProb, 96)
lfoc = self.add_module('LfoC', name='Clk')
self.connect(lfoc.outputs.Out, g2m.inputs.Clk)
setv(lfoc.params.Rate, getv(nmmp.Density))
self.params[0] = lfoc.params.Rate
class ConvPatternGen(Convert):
maing2module = 'RndPattern'
parammap = [['PatternA', 'Pattern'], ['PatternB', 'Bank'],
['StepProb', 'LowDelta'], ['LoopCount', 'Step'],
None]
inputmap = ['Clk', 'Rst', 'A']
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
pattern = (getv(nmmp.Pattern) + 64) % 128
setv(g2mp.PatternA, pattern)
bank = (getv(nmmp.Bank) + 64) % 128
setv(g2mp.PatternB, bank)
# PatternA and PatternB receive same input
if len(getattr(nmm.inputs, 'Pattern&Bank').cables):
self.connect(g2m.inputs.A, g2m.inputs.B)
lowdelta = getv(nmmp.LowDelta)
if lowdelta:
notequant = self.add_module('NoteQuant')
self.connect(g2m.outputs.Out, notequant.inputs.In)
setv(notequant.params.Range, 77)
setv(notequant.params.Notes, 1)
self.outputs[0] = notequant.outputs.Out
stepprob, add = 55, 75
setv(g2mp.StepProb, 55)
else:
stepprob, add = 127, 74
setv(g2mp.StepProb, stepprob)
levadd = self.add_module('LevAdd')
self.connect(self.outputs[0], levadd.inputs.In)
setv(levadd.params.Level, add)
self.outputs[0] = levadd.outputs.Out
| gpl-2.0 | -1,390,390,369,953,476,000 | 29.971014 | 76 | 0.656341 | false |
lukeiwanski/tensorflow-opencl | tensorflow/python/kernel_tests/control_flow_ops_py_test.py | 1 | 96552 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OiR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
# pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad
# pylint: enable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
def check_op_order(graph):
"""Sanity check on the ordering of op id."""
for op in graph.get_operations():
for v in op.inputs:
assert v.op._id < op._id or op.type == "Merge", (
"The id of %s must be less than the id of %s" % (v.op.name, op.name))
return True
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def all_fetchables():
tensor_names = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.outputs:
if graph.is_fetchable(t):
tensor_names.append(t.name)
return tensor_names
def all_feedables():
feedable_tensors = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_feedable(t):
feedable_tensors.append(t)
return feedable_tensors
def opt_cfg():
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=True)))
def isum(s):
i = constant_op.constant(0, name="i")
c = lambda i, s: math_ops.less(i, 10)
b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
_, r_s = control_flow_ops.while_loop(c, b, [i, s])
return r_s
class ControlFlowTest(test.TestCase):
def testRefIdentity(self):
with self.test_session():
v = variables.Variable(7)
v = control_flow_ops._Identity(v)
op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(check_op_order(v.graph))
self.assertTrue(isinstance(v2, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testRefEnter(self):
with self.test_session():
v = variables.Variable(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = constant_op.constant(9)
enter_nine = control_flow_ops.enter(nine, "foo_1")
op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
variables.global_variables_initializer().run()
self.assertEqual(9, v3.eval())
def testRefSwitch(self):
with self.test_session():
v = variables.Variable(7)
p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testEnterMulExit(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = control_flow_ops.enter(data, "foo_1", False)
five = constant_op.constant(5)
enter_five = control_flow_ops.enter(five, "foo_1", False)
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeIndexedSlices(self):
with self.test_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6])
indices = constant_op.constant([0, 2, 4, 6, 8, 10])
data = ops.IndexedSlices(values, indices)
pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values.eval()
ind = merge_op.indices.eval()
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
def testSwitchDeadBranch(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "The tensor returned for" in str(e)):
dead_branch.eval()
def testSwitchMergeLess(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
zero = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddIdentity(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddMul(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
five = constant_op.constant(5)
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testLoop_false(self):
with self.test_session():
false = ops.convert_to_tensor(False)
n = constant_op.constant(10)
enter_false = control_flow_ops.enter(false, "foo_1", False)
enter_n = control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = exit_n.eval()
self.assertAllEqual(10, result)
def testLoop_1(self):
with self.test_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testLoop_2(self):
with self.test_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testDifferentFrame(self):
with self.test_session():
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = control_flow_ops.enter(data, "foo_1", False)
enter_2 = control_flow_ops.enter(data, "foo_2", False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
def testCondBool(self):
values = constant_op.constant(10)
fn1 = lambda: math_ops.add(values, 1)
fn2 = lambda: math_ops.subtract(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = control_flow_ops.cond(False, fn1, fn2)
def testFetchables(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32)
control_flow_ops.cond(
constant_op.constant(True), lambda: x + 2, lambda: x + 0)
tensor_names = all_fetchables()
for name in tensor_names:
sess.run(name, feed_dict={x: 3})
def testFeedable(self):
with self.test_session() as sess:
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
self.assertEqual(1000, r.eval(feed_dict={i0: 0}))
feedable_tensors = all_feedables()
for t in feedable_tensors:
sess.run(r, feed_dict={t: 3})
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if t not in feedable_tensors and t.dtype is dtypes.int32:
with self.assertRaisesRegexp(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3})
def testCondIndexedSlices(self):
with self.test_session():
values = constant_op.constant(10)
indices = constant_op.constant(0)
x = ops.IndexedSlices(values, indices)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertTrue(check_op_order(x.values.graph))
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
def testCondSparseTensor(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
pred = math_ops.less(1, 2)
fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values.eval())
self.assertAllEqual([[1], [4]], r.indices.eval())
self.assertAllEqual(r.values.get_shape(), (2,))
def testCondResource(self):
with self.test_session():
rv = resource_variable_ops.ResourceVariable(True)
variables.global_variables_initializer().run()
t = ops.convert_to_tensor(1.0)
def case():
assign = resource_variable_ops.assign_variable_op(rv.handle, False)
with ops.control_dependencies([assign]):
return array_ops.identity(t)
self.assertEqual(1.0, control_flow_ops.cond(rv, case, lambda: t).eval())
def testCondIndexedSlicesDifferentTypes(self):
with self.test_session():
values = constant_op.constant(10)
i_32 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int32)
i_64 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int64)
x = ops.IndexedSlices(values, i_32)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertTrue(check_op_order(x.values.graph))
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
def testCondColocation(self):
with self.test_session(use_gpu=True):
with ops.device("/cpu:0"):
v = variables.Variable(7.0)
x = constant_op.constant(10.0)
pred = math_ops.less(1.0, 2.0)
fn1 = lambda: math_ops.add(v, 1.0)
fn2 = lambda: math_ops.subtract(x, 1.0)
r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.test_session():
x = constant_op.constant(10)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(9, result)
def testCond_3(self):
with self.test_session():
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(12, result)
def testCond_4(self):
with self.test_session():
v1 = variables.Variable(7)
v2 = variables.Variable(7)
v3 = variables.Variable(7)
age = constant_op.constant(3)
max_age = constant_op.constant(2)
pred = math_ops.greater(age, max_age)
fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
self.assertEqual(len(r), 2)
result = r[1].eval()
self.assertTrue(check_op_order(age.graph))
self.assertAllEqual(True, result)
self.assertAllEqual(7, v1.eval())
self.assertAllEqual(2, v2.eval())
self.assertAllEqual(7, v3.eval())
def testCond_5(self):
with self.test_session():
alive = constant_op.constant(True, name="alive")
count = constant_op.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, count.eval())
def testCond_6(self):
with self.test_session():
v1 = variables.Variable([7])
age = constant_op.constant(3)
pred = math_ops.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
result = r.eval()
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.test_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], sess.run(r))
def testCondRef(self):
with self.test_session():
x = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="x",
container="",
shared_name="")
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], r.eval())
def testCondWithControl(self):
with self.test_session() as sess:
control_holder = array_ops.placeholder(dtypes.float32, shape=())
a = constant_op.constant(3)
def true_branch():
with ops.control_dependencies([control_holder]):
_ = a + 1
return a + 2
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
self.assertEqual(5, r.eval())
def testUninitializedRefIdentity(self):
with self.test_session() as sess:
v = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="v",
container="",
shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that _ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops._ref_identity(v_f)
v_t_op = gen_array_ops._ref_identity(v_t)
with ops.control_dependencies([v_f_op]):
assign_v = state_ops.assign(v, [1.0])
with ops.control_dependencies([v_t_op]):
orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], sess.run(merged_op.output))
def testCondSwitchIdentity(self):
# Make sure the recv identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondRecvIdentity(self):
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
with ops.device("/cpu:0"):
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondGrad_1(self):
with self.test_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.identity(x)
fn2 = lambda: array_ops.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
result = grad.eval()
self.assertAllEqual(1.0, result)
def testCondGrad_2(self):
with self.test_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
x = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
fn1 = lambda: math_ops.multiply(x, 42.0)
fn2 = lambda: math_ops.multiply(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
def testNestedCond_Simple(self):
with self.test_session():
x = constant_op.constant(0., name="X")
y = control_flow_ops.cond(
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, result.eval())
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, result.eval())
def testCondGrad_Gather(self):
with self.test_session() as sess:
v1 = variables.Variable([1.0, 42.0])
c = array_ops.placeholder(dtypes.int32, shape=[])
pred = math_ops.less(c, 2)
fn1 = lambda: array_ops.identity(v1)
fn2 = lambda: array_ops.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [v1])[0]
variables.global_variables_initializer().run()
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [0.0, 2.0])
# Microbenchmark: 256,000 iterations/s.
def testWhile_1(self):
with self.test_session():
n = constant_op.constant(0)
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithRefs_1(self):
with self.test_session() as sess:
x = variables.Variable(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return (i + 1, gen_array_ops._ref_identity(x))
r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = sess.run(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.test_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, r.eval())
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
def testWhile_3(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
d = ops.convert_to_tensor(100)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(10100, result)
def testWhile_4(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(42, result)
def testWhile_5(self):
with self.test_session():
def compute(i, c, o):
c = array_ops.strided_slice(x,
array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
i = ops.convert_to_tensor(0)
c = ops.convert_to_tensor([0])
o = ops.convert_to_tensor([0])
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(
lambda i, c, o: math_ops.less(i, s), compute, [i, c, o], [
i.get_shape(), tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()
])
result = r[2].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
def testBufferForwarding(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.test_session() as sess:
with ops.device("/cpu:0"):
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
r_val = sess.run(r, options=run_options, run_metadata=run_metadata)
self.assertEqual(1000, r_val)
self.assertTrue(run_metadata.HasField("step_stats"))
unique_allocs = set()
for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:
for output in node_stat.output:
unique_allocs.add(
output.tensor_description.allocation_description.ptr)
# Prior to cl/147536680, the number of unique allocations was about 1005.
self.assertLess(len(unique_allocs), 756)
def _testWhile_Gpu_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_2(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def testWhileShape(self):
with self.test_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def _b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, _b, [i, m], [i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), r.eval())
def testWhileWithNonTensorInput_Scalar(self):
with self.test_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithNonTensorInput_Vector(self):
with self.test_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], r.eval())
def testWhileShapeInference(self):
with self.test_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, b, [i, m], [i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertTrue(r[1].get_shape()[0].value is None)
self.assertEqual(r[1].get_shape()[1], tensor_shape.Dimension(2))
with self.assertRaisesRegexp(ValueError, "not an invariant for"):
r = control_flow_ops.while_loop(c, b, [i, m])
def testWhileShapeInferenceSparseTensor(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1, sparse_tensor.SparseTensor(x.indices, x.values * 2.0,
x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 1)
_, r = control_flow_ops.while_loop(
c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([None])])
self.assertTrue(r.dense_shape.get_shape()[0].value is None)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([5])])
def testWhileShapeInferenceIndexedSlices(self):
with self.test_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10, 2], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1, ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
_, r = control_flow_ops.while_loop(
c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertTrue(r.values.get_shape()[0].value is None)
self.assertEqual(r.values.get_shape()[1].value, 2)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([None, 5])])
def _testNestedWhile_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
def cpu_sum(s):
c = lambda i, s: math_ops.less(i, 10)
def b(i, s):
i1 = math_ops.add(i, 1)
with ops.device("/cpu:0"):
s1 = math_ops.add(i, s)
return i1, s1
_, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, r.eval())
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.test_session(use_gpu=use_gpu):
s0 = constant_op.constant(2.0)
def inner_loop(s):
c = lambda s: math_ops.less(s, 20.0)
def b(s):
s1 = math_ops.add(s, s)
return s1
r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
outer_c = lambda x: math_ops.less(x, 3000.0)
def outer_b(x):
x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
with ops.device("/cpu:0"):
x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, r.eval())
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True)
def testWhileWithControl_1(self):
with self.test_session():
n = constant_op.constant(0)
r = constant_op.constant(0)
condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [n_, r_]
res = control_flow_ops.while_loop(
condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1].eval())
def testWhileWithControl_2(self):
with self.test_session():
r = constant_op.constant(0)
condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [r_]
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, res.eval())
def testWhileWithControl_3(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_4(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(
lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_5(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
def body(x):
with ops.control_dependencies([b]):
return x + c
r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.test_session() as sess:
const_true = lambda: constant_op.constant(True)
const_false = lambda: constant_op.constant(False)
cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
with ops.control_dependencies([control_flow_ops.no_op()]):
loop = control_flow_ops.while_loop(cond, body,
(constant_op.constant(5),))
self.assertEqual(0, sess.run(loop))
def testWhileCondExitControl(self):
with self.test_session():
v = variables.Variable(1)
def false_branch():
cond = lambda i: i < 100
def body(i):
x = state_ops.assign(v, i)
return x + 1
loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
with ops.control_dependencies([loop]):
return constant_op.constant(6.0)
r = control_flow_ops.cond(
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
variables.global_variables_initializer().run()
self.assertEqual(6.0, r.eval())
self.assertEqual(99, v.eval())
def testCondWhile_1(self):
with self.test_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, r.eval())
def testCondWhile_2(self):
with self.test_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, r.eval())
def _testCondWhile_3(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
p = array_ops.placeholder(dtypes.bool)
n = constant_op.constant(0.0)
def c(x):
return math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
x1 = math_ops.add(x, 1.0)
return x1
r = control_flow_ops.cond(p,
lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: math_ops.multiply(n, 2.0))
r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10, sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
self.assertEqual([2.0], sess.run(r1, {p: False}))
def testCondWhile_3(self):
self._testCondWhile_3(use_gpu=False)
self._testCondWhile_3(use_gpu=True)
def testWhileCond_1(self):
with self.test_session():
i = ops.convert_to_tensor(0, name="i")
n = ops.convert_to_tensor(10, name="n")
one = ops.convert_to_tensor(1, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, r.eval())
def testWhileCond_2(self):
with self.test_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
def testWhileCond_3(self):
with self.test_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
# NOTE: It is ok to have parallel_iterations > 1
def testWhileUpdateVariable_1(self):
with self.test_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
def testWhileUpdateVariable_2(self):
with self.test_session():
select1 = variables.Variable([3.0, 4.0, 5.0])
select2 = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns1 = state_ops.scatter_update(select1, j, 10.0)
ns2 = state_ops.scatter_update(select2, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = select2.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
def testWhileUpdateVariable_3(self):
with self.test_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j, _):
return math_ops.less(j, 3)
def loop_body(j, _):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
return [nj, ns]
r = control_flow_ops.while_loop(
loop_iterator,
loop_body, [n, array_ops.identity(select)],
parallel_iterations=1)
variables.global_variables_initializer().run()
result = r[1].eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
# b/24814703
def testWhileUpdateVariable_4(self):
with self.test_session():
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
c = constant_op.constant(0, name="c")
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_b.eval())
# b/24736492
def testWhileUpdateVariable_5(self):
with self.test_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
# Change condition to check var_b
def pred(_):
return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = state_ops.assign_add(
var_a, constant_op.constant(1), name="a_add")
asn2 = state_ops.assign_add(
var_b, constant_op.constant(1), name="b_add")
with ops.control_dependencies([asn1, asn2]):
inc_b = array_ops.identity(var_b)
return inc_b
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_a.eval())
self.assertEqual(10, var_b.eval())
# b/24814668
def testWhileUpdateVariable_6(self):
with self.test_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
c = constant_op.constant(0)
variables.global_variables_initializer().run()
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
with ops.control_dependencies([asn1]):
asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(55, var_b.eval())
self.assertEqual(10, var_a.eval())
def testWhileQueue_1(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
def testWhileStack_1(self):
with self.test_session():
s = gen_data_flow_ops._stack(dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops._stack_push(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
x = constant_op.constant(0)
def c1(i, _):
return math_ops.greater(i, 0)
def b1(i, x):
ni = math_ops.subtract(i, 1)
nx = x + gen_data_flow_ops._stack_pop(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
c1,
b1, [r, x], [r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, rx.eval())
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available() else "/gpu:0"
gpu_short_name = gpu_dev_name.split('/')[-1].lower()
with self.test_session(graph=ops.Graph()) as sess:
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(
loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = r.graph.get_operations()
r_devices = [(op.name, op.device.lower()) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue(gpu_short_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue(gpu_short_name in dev)
else:
self.assertFalse(gpu_short_name in dev)
self.assertAllClose(1024.0, sess.run(r))
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
def testWhileGrad_Square(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileGrad_Shape(self):
with self.test_session():
x = array_ops.placeholder(dtypes.float32, shape=[None])
v = constant_op.constant([2.0], name="v")
n = constant_op.constant(0, name="n")
c = lambda i, v: math_ops.less(i, 5)
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
r = control_flow_ops.while_loop(
c,
b, [n, v], [n.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
def testWhileGrad_BaseShape(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32, [None])
v0 = constant_op.constant([2.0, 2.0], name="v")
c = lambda v: constant_op.constant(False)
b = lambda v: math_ops.multiply(v, x)
r = control_flow_ops.while_loop(c, b, [v0])
y = math_ops.square(x)
r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
def testWhileGrad_MultipleUses(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, r.eval())
def testWhileGrad_LoopAdd(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, r.eval())
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.test_session(use_gpu=use_gpu) as sess:
a = constant_op.constant(3.0, name="a")
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = sess.run([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def _testNestedWhileCondWhileGrad(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
def b(x):
return control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.square(inner_loop(x)[1]),
lambda: math_ops.multiply(x, 2.0))
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def testNestedWhileCondWhileGrad(self):
self._testNestedWhileCondWhileGrad(use_gpu=False)
self._testNestedWhileCondWhileGrad(use_gpu=True)
def testWhileGrad_Variable(self):
with self.test_session():
a = variables.Variable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(r, a)
variables.global_variables_initializer().run()
self.assertAllClose(216.0, r[0].eval())
def testWhileGradInCond(self):
with self.test_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def fn1():
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testWhileGradInWhile(self):
with self.test_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def b1(n):
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
[tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testWhile_NestedInput(self):
with self.test_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0),
constant_op.constant(3.0)), constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
sess.run(r_flattened))
def testWhile_NestedBadArityFails(self):
with self.test_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0),
constant_op.constant(3.0)), constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegexp(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars)
def testWhileGrad_ys_xs(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.add(x, y)
x1 = math_ops.multiply(x, y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0].eval())
r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0].eval())
r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0].eval())
def testWhileGrad_Dependency(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 10)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_NoGradient(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
r = math_ops.add(r, v)
r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0].eval())
def testWhileGrad_NoDependency(self):
with self.test_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3]))
time = array_ops.zeros([], dtype=dtypes.int32)
def cond(time, tensor, _):
return time < 10
def body(time, tensor, _):
return (time + 1, tensor, tensor)
loop_vars = [time, variable, variable]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[2])
grad = gradients_impl.gradients(cost, [variable])
variables.global_variables_initializer().run()
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
def testWhileGrad_Const(self):
with self.test_session() as sess:
c0 = constant_op.constant(0.0, name="c0")
c1 = constant_op.constant(1.0, name="c1")
time = constant_op.constant(0, name="t")
def cond(time, _):
return time < 1
def body(time, tensor):
return time + 1, c1
loop_vars = [time, c0]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[1])
grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
def testWhileGrad_SerialTwoLoops(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_ParallelTwoLoops(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
rx = math_ops.add(r1, r2)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0].eval())
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
return (math_ops.add(i, 1, name="inc"), array_ops.identity(
xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
_, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, y_f_d.eval()) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0, g.eval()) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
c = lambda x: math_ops.less(x, 4.0)
b = lambda x: math_ops.multiply(x, 2.0)
return control_flow_ops.while_loop(c, b, [s])
c = lambda x: math_ops.less(x, 2.0)
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, r.eval())
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
def testNestedWhileGrad_SerialInner(self):
with self.test_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, r.eval())
def testNestedWhileGrad_ParallelInner(self):
with self.test_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.test_session() as sess:
def inner_loop(t):
fn = lambda n: n + math_ops.square(var)
return functional_ops.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
return functional_ops.map_fn(
fn=inner_loop, elems=inp, parallel_iterations=10)
var = variables.Variable(constant_op.constant(3.0))
inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
optimizer = adam.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
sess.run(variables.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(2.999, var.eval())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = ops.convert_to_tensor(2.0, name="v")
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
def testWhileCondGrad_UnknownShape(self):
with self.test_session() as sess:
v = array_ops.placeholder(dtypes.float32)
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
def testWhileGrad_Concat(self):
with self.test_session() as sess:
x = variable_scope.get_variable("x", initializer=[[1., 2.]])
i0 = constant_op.constant(0)
h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
s = math_ops.reduce_sum(h)
sess.run(variables.global_variables_initializer())
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
sess.run(op)
self.assertAllClose([[0.98000002, 1.98000002]], sess.run(x))
def testWhileWithRefsWithGradients_1(self):
with self.test_session() as sess:
x = variables.Variable(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.int32_ref)
# pylint: disable=protected-access
def body(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return [i + 1, gen_array_ops._ref_identity(x)]
# pylint: enable=protected-access
r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [variables.Variable(73)._ref()] # pylint: disable=protected-access
grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
def testWhileGrad_IndexedSlices(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1, ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testWhileGrad_SparseTensor(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1, sparse_tensor.SparseTensor(x.indices, x.values * 2.0,
x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testCallGradInLoop(self):
with self.test_session() as sess:
i0 = constant_op.constant(0)
params = constant_op.constant(5.0)
params_1 = math_ops.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = constant_op.constant([1.0, 2.0, 3.0])
data = math_ops.multiply(data, params_1)
x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
output_grad = control_flow_ops.while_loop(c, b,
[i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, sess.run(output_grad)[1])
def testWhileAndTensorArray(self):
with self.test_session() as sess:
param = constant_op.constant(2.0)
n0 = constant_op.constant(0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
def b(i, y):
return [
i + 1,
functional_ops.map_fn(lambda x: math_ops.multiply(x, param), y)
]
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, sess.run(r))
def testWhileGrad_StopGrad(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.square(y)
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, r.eval())
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, r.eval())
def testWhileGrad_StopGradInside(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, r.eval())
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, r.eval())
def testWhileGrad_StopGradInsideNoShape(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(r, feed_dict=feed_dict))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any([name in op.name for op in all_ops]))
def testWhileGradGradFail(self):
theta = variables.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = gradients_impl.gradients(result, theta)
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
gradients_impl.gradients(grad_theta, theta)
grad_theta_stopped = array_ops.stop_gradient(grad_theta)
gradients_impl.gradients(grad_theta_stopped, theta)
def testStopGradOnWhileGrad(self):
with self.test_session():
x = constant_op.constant(2.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x: math_ops.less(x, 100.0)
b = lambda x: math_ops.multiply(x, y)
rx = control_flow_ops.while_loop(c, b, [x])
rg = gradients_impl.gradients(rx, y)[0]
rg = array_ops.stop_gradient(rg)
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, r.eval())
def testOneValueCond(self):
with self.test_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
one = ops.convert_to_tensor(1, name="one")
two = ops.convert_to_tensor(2, name="two")
p = math_ops.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, ops.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
def testExampleCond(self):
with self.test_session():
x = ops.convert_to_tensor([-2.0, 2.0], name="x")
d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
return math_ops.reduce_sum(math_ops.abs(x))
i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
def testCase(self):
with self.test_session():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
{
x < y: f1,
x > z: f2
}, default=f3, exclusive=True)
self.assertAllEqual(r1.eval(), 17)
r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2.eval(), 23)
# Duplicate events can happen, first one is selected
r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3.eval(), 17)
# Duplicate events cause an error if exclusive = True
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError(
"More than one condition evaluated as True but exclusive=True."):
r4.eval()
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5.eval(), -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return constant_op.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = control_flow_ops.case(
[(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: constant_op.constant(2))
self.assertAllEqual(r6.eval(), 0)
def testCaseSideEffects(self):
with self.test_session() as sess:
v0 = variables.Variable(-1)
v1 = variables.Variable(-1)
v2 = variables.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
x = constant_op.constant(1)
y = constant_op.constant(2)
r0 = control_flow_ops.case(
((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = control_flow_ops.case(
((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = control_flow_ops.case(
((x > y, a), (x > y, b)), default=c, exclusive=True)
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, r2.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, r1.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, r0.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1])
def testOneOpCond(self):
with self.test_session():
v = variables.Variable(0)
c = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
two = ops.convert_to_tensor(2)
p = math_ops.greater_equal(c, 1)
def a():
return state_ops.assign(v, one)
def b():
return state_ops.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(0, v.eval())
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, v.eval())
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, v.eval())
def testWithOpsDependencies(self):
with self.test_session() as sess:
v = variables.Variable(0.0)
c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v._ref(), # pylint: disable=protected-access
dependencies=[v.initializer])
c_val, real_v_val = sess.run([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
def testWithTensorDependencies(self):
with self.test_session():
v = variables.Variable(0.0)
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v.eval()
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
def testWithIndexedSlicesDependencies(self):
with self.test_session():
v = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
gather_v_at_1.eval()
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
def testDependenciesDevice(self):
with ops.Graph().as_default():
# device set on tensor => same device on dep.
with ops.device("/job:ps"):
vd = variables.Variable([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = variables.Variable([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = variables.Variable([0.0], name="vdef")
with ops.device("/job:worker/gpu:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
def testGroup(self):
with self.test_session() as sess:
v1 = variables.Variable([0.0])
v2 = variables.Variable([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = sess.run([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
def testGroupEmpty(self):
op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
def testMergeShapes(self):
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
v1 = variables.Variable(p1, validate_shape=False)
v2 = variables.Variable(p2, validate_shape=False)
v3 = variables.Variable(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = variables.Variable([[1, 2]])
v2 = variables.Variable([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = variables.Variable([[1, 2]])
v2 = variables.Variable([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = variables.Variable([[1., 2.]])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
v2 = variables.Variable(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
def testRunLoopTensor(self):
with self.test_session() as sess:
tensor_list = []
def condition(t):
return t < constant_op.constant(5)
def body(_):
tensor_list.append(constant_op.constant(5))
return constant_op.constant(10)
result = control_flow_ops.while_loop(condition, body,
[constant_op.constant(4)])
self.assertEqual(10, sess.run(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.test_session():
r = control_flow_ops.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
[constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
def testWhileFuncBasic(self):
@function.Defun(dtypes.float32)
def func(x):
return math_ops.square(math_ops.square(x))
with self.test_session():
x = constant_op.constant(2.0, dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
[constant_op.constant(0), x],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
r = gradients_impl.gradients(r, x)[0]
self.assertEqual(r.eval(), 524288.0)
self.assertEqual(
len([op for op in x.graph.get_operations() if op.type == "Stack"]), 1)
class TupleTest(test.TestCase):
def testTensors(self):
for v1_first in [True, False]:
with self.test_session():
v1 = variables.Variable([1.0])
add1 = math_ops.add(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
2.0)
v2 = variables.Variable([10.0])
add2 = math_ops.add(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.test_session():
v1 = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
v2 = variables.Variable(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = array_ops.gather(st1.values, st1.indices)
g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
v1.eval())
def testAcceptTensorsAsControlInputs(self):
with self.test_session():
var = variables.Variable(0)
assign = state_ops.assign(var, 1)
t, = control_flow_ops.tuple(
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
t.eval()
self.assertEquals(1, var.eval())
class AssertTest(test.TestCase):
def testGuardedAssertDoesNotCopyWhenTrue(self):
with self.test_session(use_gpu=True) as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
if __name__ == "__main__":
test.main()
| apache-2.0 | 7,649,208,543,106,619,000 | 35.216054 | 112 | 0.604876 | false |
rahulunair/nova | nova/tests/unit/network/test_network_info.py | 1 | 49810 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova.network import model
from nova import objects
from nova import test
from nova.tests.unit import fake_network_cache_model
from nova.virt import netutils
class RouteTests(test.NoDBTestCase):
def test_create_route_with_attrs(self):
route = fake_network_cache_model.new_route()
fake_network_cache_model.new_ip(dict(address='192.168.1.1'))
self.assertEqual('0.0.0.0/24', route['cidr'])
self.assertEqual('192.168.1.1', route['gateway']['address'])
self.assertEqual('eth0', route['interface'])
def test_routes_equal(self):
route1 = model.Route()
route2 = model.Route()
self.assertEqual(route1, route2)
def test_routes_not_equal(self):
route1 = model.Route(cidr='1.1.1.0/24')
route2 = model.Route(cidr='2.2.2.0/24')
self.assertNotEqual(route1, route2)
route1 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.1')
route2 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.2')
self.assertNotEqual(route1, route2)
route1 = model.Route(cidr='1.1.1.1/24', interface='tap0')
route2 = model.Route(cidr='1.1.1.1/24', interface='tap1')
self.assertNotEqual(route1, route2)
def test_hydrate(self):
route = model.Route.hydrate(
{'gateway': fake_network_cache_model.new_ip(
dict(address='192.168.1.1'))})
self.assertIsNone(route['cidr'])
self.assertEqual('192.168.1.1', route['gateway']['address'])
self.assertIsNone(route['interface'])
class IPTests(test.NoDBTestCase):
def test_ip_equal(self):
ip1 = model.IP(address='127.0.0.1')
ip2 = model.IP(address='127.0.0.1')
self.assertEqual(ip1, ip2)
def test_ip_not_equal(self):
ip1 = model.IP(address='127.0.0.1')
ip2 = model.IP(address='172.0.0.3')
self.assertNotEqual(ip1, ip2)
ip1 = model.IP(address='127.0.0.1', type=1)
ip2 = model.IP(address='172.0.0.1', type=2)
self.assertNotEqual(ip1, ip2)
ip1 = model.IP(address='127.0.0.1', version=4)
ip2 = model.IP(address='172.0.0.1', version=6)
self.assertNotEqual(ip1, ip2)
class FixedIPTests(test.NoDBTestCase):
def test_createnew_fixed_ip_with_attrs(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
self.assertEqual('192.168.1.100', fixed_ip['address'])
self.assertEqual([], fixed_ip['floating_ips'])
self.assertEqual('fixed', fixed_ip['type'])
self.assertEqual(4, fixed_ip['version'])
def test_create_fixed_ipv6(self):
fixed_ip = model.FixedIP(address='::1')
self.assertEqual('::1', fixed_ip['address'])
self.assertEqual([], fixed_ip['floating_ips'])
self.assertEqual('fixed', fixed_ip['type'])
self.assertEqual(6, fixed_ip['version'])
def test_create_fixed_bad_ip_fails(self):
self.assertRaises(exception.InvalidIpAddressError,
model.FixedIP,
address='picklespicklespickles')
def test_equate_two_fixed_ips(self):
fixed_ip = model.FixedIP(address='::1')
fixed_ip2 = model.FixedIP(address='::1')
self.assertEqual(fixed_ip, fixed_ip2)
def test_equate_two_dissimilar_fixed_ips_fails(self):
fixed_ip = model.FixedIP(address='::1')
fixed_ip2 = model.FixedIP(address='::2')
self.assertNotEqual(fixed_ip, fixed_ip2)
fixed_ip = model.FixedIP(address='::1', type='1')
fixed_ip2 = model.FixedIP(address='::1', type='2')
self.assertNotEqual(fixed_ip, fixed_ip2)
fixed_ip = model.FixedIP(address='::1', version='6')
fixed_ip2 = model.FixedIP(address='::1', version='4')
self.assertNotEqual(fixed_ip, fixed_ip2)
fixed_ip = model.FixedIP(address='::1', floating_ips='1.1.1.1')
fixed_ip2 = model.FixedIP(address='::1', floating_ips='8.8.8.8')
self.assertNotEqual(fixed_ip, fixed_ip2)
def test_hydrate(self):
fixed_ip = model.FixedIP.hydrate({})
self.assertEqual([], fixed_ip['floating_ips'])
self.assertIsNone(fixed_ip['address'])
self.assertEqual('fixed', fixed_ip['type'])
self.assertIsNone(fixed_ip['version'])
def test_add_floating_ip(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
fixed_ip.add_floating_ip('192.168.1.101')
self.assertEqual(['192.168.1.101'], fixed_ip['floating_ips'])
def test_add_floating_ip_repeatedly_only_one_instance(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
for i in range(10):
fixed_ip.add_floating_ip('192.168.1.101')
self.assertEqual(['192.168.1.101'], fixed_ip['floating_ips'])
class SubnetTests(test.NoDBTestCase):
def test_create_subnet_with_attrs(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
self.assertEqual('10.10.0.0/24', subnet['cidr'])
self.assertEqual(
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5'))],
subnet['dns'])
self.assertEqual('10.10.0.1', subnet['gateway']['address'])
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.3'))], subnet['ips'])
self.assertEqual([route1], subnet['routes'])
self.assertEqual(4, subnet['version'])
def test_subnet_equal(self):
subnet1 = fake_network_cache_model.new_subnet()
subnet2 = fake_network_cache_model.new_subnet()
self.assertEqual(subnet1, subnet2)
def test_subnet_not_equal(self):
subnet1 = model.Subnet(cidr='1.1.1.0/24')
subnet2 = model.Subnet(cidr='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(dns='1.1.1.0/24')
subnet2 = model.Subnet(dns='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(gateway='1.1.1.1/24')
subnet2 = model.Subnet(gateway='2.2.2.1/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(ips='1.1.1.0/24')
subnet2 = model.Subnet(ips='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(routes='1.1.1.0/24')
subnet2 = model.Subnet(routes='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(version='4')
subnet2 = model.Subnet(version='6')
self.assertNotEqual(subnet1, subnet2)
def test_add_route(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
subnet.add_route(route2)
self.assertEqual([route1, route2], subnet['routes'])
def test_add_route_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
for i in range(10):
subnet.add_route(route2)
self.assertEqual([route1, route2], subnet['routes'])
def test_add_dns(self):
subnet = fake_network_cache_model.new_subnet()
dns = fake_network_cache_model.new_ip(dict(address='9.9.9.9'))
subnet.add_dns(dns)
self.assertEqual(
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
fake_network_cache_model.new_ip(dict(address='9.9.9.9'))],
subnet['dns'])
def test_add_dns_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in range(10):
subnet.add_dns(fake_network_cache_model.new_ip(
dict(address='9.9.9.9')))
self.assertEqual(
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
fake_network_cache_model.new_ip(dict(address='9.9.9.9'))],
subnet['dns'])
def test_add_ip(self):
subnet = fake_network_cache_model.new_subnet()
subnet.add_ip(fake_network_cache_model.new_ip(
dict(address='192.168.1.102')))
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.3')),
fake_network_cache_model.new_ip(
dict(address='192.168.1.102'))], subnet['ips'])
def test_add_ip_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in range(10):
subnet.add_ip(fake_network_cache_model.new_fixed_ip(
dict(address='192.168.1.102')))
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.3')),
fake_network_cache_model.new_fixed_ip(
dict(address='192.168.1.102'))], subnet['ips'])
def test_hydrate(self):
subnet_dict = {
'cidr': '255.255.255.0',
'dns': [fake_network_cache_model.new_ip(dict(address='1.1.1.1'))],
'ips': [fake_network_cache_model.new_fixed_ip(
dict(address='2.2.2.2'))],
'routes': [fake_network_cache_model.new_route()],
'version': 4,
'gateway': fake_network_cache_model.new_ip(
dict(address='3.3.3.3'))}
subnet = model.Subnet.hydrate(subnet_dict)
self.assertEqual('255.255.255.0', subnet['cidr'])
self.assertEqual([fake_network_cache_model.new_ip(
dict(address='1.1.1.1'))], subnet['dns'])
self.assertEqual('3.3.3.3', subnet['gateway']['address'])
self.assertEqual([fake_network_cache_model.new_fixed_ip(
dict(address='2.2.2.2'))], subnet['ips'])
self.assertEqual([fake_network_cache_model.new_route()],
subnet['routes'])
self.assertEqual(4, subnet['version'])
class NetworkTests(test.NoDBTestCase):
def test_create_network(self):
network = fake_network_cache_model.new_network()
self.assertEqual(1, network['id'])
self.assertEqual('br0', network['bridge'])
self.assertEqual('public', network['label'])
self.assertEqual(
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))], network['subnets'])
def test_add_subnet(self):
network = fake_network_cache_model.new_network()
network.add_subnet(fake_network_cache_model.new_subnet(
dict(cidr='0.0.0.0')))
self.assertEqual(
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255')),
fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))],
network['subnets'])
def test_add_subnet_a_lot(self):
network = fake_network_cache_model.new_network()
for i in range(10):
network.add_subnet(fake_network_cache_model.new_subnet(
dict(cidr='0.0.0.0')))
self.assertEqual(
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255')),
fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))],
network['subnets'])
def test_network_equal(self):
network1 = model.Network()
network2 = model.Network()
self.assertEqual(network1, network2)
def test_network_not_equal(self):
network1 = model.Network(id='1')
network2 = model.Network(id='2')
self.assertNotEqual(network1, network2)
network1 = model.Network(bridge='br-int')
network2 = model.Network(bridge='br0')
self.assertNotEqual(network1, network2)
network1 = model.Network(label='net1')
network2 = model.Network(label='net2')
self.assertNotEqual(network1, network2)
network1 = model.Network(subnets='1.1.1.0/24')
network2 = model.Network(subnets='2.2.2.0/24')
self.assertNotEqual(network1, network2)
def test_hydrate(self):
fake_network_cache_model.new_subnet()
fake_network_cache_model.new_subnet(dict(cidr='255.255.255.255'))
network = model.Network.hydrate(fake_network_cache_model.new_network())
self.assertEqual(1, network['id'])
self.assertEqual('br0', network['bridge'])
self.assertEqual('public', network['label'])
self.assertEqual(
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))], network['subnets'])
class VIFTests(test.NoDBTestCase):
def test_create_vif(self):
vif = fake_network_cache_model.new_vif()
self.assertEqual(1, vif['id'])
self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address'])
self.assertEqual(fake_network_cache_model.new_network(),
vif['network'])
def test_vif_equal(self):
vif1 = model.VIF()
vif2 = model.VIF()
self.assertEqual(vif1, vif2)
def test_vif_not_equal(self):
vif1 = model.VIF(id=1)
vif2 = model.VIF(id=2)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(address='00:00:00:00:00:11')
vif2 = model.VIF(address='00:00:00:00:00:22')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(network='net1')
vif2 = model.VIF(network='net2')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(type='ovs')
vif2 = model.VIF(type='linuxbridge')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(devname='ovs1234')
vif2 = model.VIF(devname='linuxbridge1234')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(qbh_params=1)
vif2 = model.VIF(qbh_params=None)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(qbg_params=1)
vif2 = model.VIF(qbg_params=None)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(active=True)
vif2 = model.VIF(active=False)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(vnic_type=model.VNIC_TYPE_NORMAL)
vif2 = model.VIF(vnic_type=model.VNIC_TYPE_DIRECT)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(profile={'pci_slot': '0000:0a:00.1'})
vif2 = model.VIF(profile={'pci_slot': '0000:0a:00.2'})
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(preserve_on_delete=True)
vif2 = model.VIF(preserve_on_delete=False)
self.assertNotEqual(vif1, vif2)
def test_create_vif_with_type(self):
vif_dict = dict(
id=1,
address='aa:aa:aa:aa:aa:aa',
network=fake_network_cache_model.new_network(),
type='bridge')
vif = fake_network_cache_model.new_vif(vif_dict)
self.assertEqual(1, vif['id'])
self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address'])
self.assertEqual('bridge', vif['type'])
self.assertEqual(fake_network_cache_model.new_network(),
vif['network'])
def test_vif_get_fixed_ips(self):
vif = fake_network_cache_model.new_vif()
fixed_ips = vif.fixed_ips()
ips = [
fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.3'))
] * 2
self.assertEqual(fixed_ips, ips)
def test_vif_get_fixed_ips_network_is_none(self):
vif = model.VIF()
fixed_ips = vif.fixed_ips()
self.assertEqual([], fixed_ips)
def test_vif_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
floating_ips = vif.floating_ips()
self.assertEqual(['192.168.1.1'], floating_ips)
def test_vif_get_labeled_ips(self):
vif = fake_network_cache_model.new_vif()
labeled_ips = vif.labeled_ips()
ip_dict = {
'network_id': 1,
'ips': [fake_network_cache_model.new_ip(
{'address': '10.10.0.2', 'type': 'fixed'}),
fake_network_cache_model.new_ip(
{'address': '10.10.0.3', 'type': 'fixed'})] * 2,
'network_label': 'public'}
self.assertEqual(ip_dict, labeled_ips)
def test_hydrate(self):
fake_network_cache_model.new_network()
vif = model.VIF.hydrate(fake_network_cache_model.new_vif())
self.assertEqual(1, vif['id'])
self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address'])
self.assertEqual(fake_network_cache_model.new_network(),
vif['network'])
def test_hydrate_vif_with_type(self):
vif_dict = dict(
id=1,
address='aa:aa:aa:aa:aa:aa',
network=fake_network_cache_model.new_network(),
type='bridge')
vif = model.VIF.hydrate(fake_network_cache_model.new_vif(vif_dict))
self.assertEqual(1, vif['id'])
self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address'])
self.assertEqual('bridge', vif['type'])
self.assertEqual(fake_network_cache_model.new_network(),
vif['network'])
class NetworkInfoTests(test.NoDBTestCase):
def test_create_model(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.2'}),
fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.3'})] * 4, ninfo.fixed_ips())
def test_create_async_model(self):
def async_wrapper():
return model.NetworkInfo(
[fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.2'}),
fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.3'})] * 4, ninfo.fixed_ips())
def test_create_async_model_exceptions(self):
def async_wrapper():
raise test.TestingException()
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertRaises(test.TestingException, ninfo.wait)
# 2nd one doesn't raise
self.assertIsNone(ninfo.wait())
# Test that do_raise=False works on .wait()
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertIsNone(ninfo.wait(do_raise=False))
# Test we also raise calling a method
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertRaises(test.TestingException, ninfo.fixed_ips)
def test_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
ninfo = model.NetworkInfo([vif,
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(['192.168.1.1'], ninfo.floating_ips())
def test_hydrate(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
model.NetworkInfo.hydrate(ninfo)
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.2'}),
fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.3'})] * 4, ninfo.fixed_ips())
def _setup_injected_network_scenario(self, should_inject=True,
use_ipv4=True, use_ipv6=False,
gateway=True, dns=True,
two_interfaces=False,
libvirt_virt_type=None):
"""Check that netutils properly decides whether to inject based on
whether the supplied subnet is static or dynamic.
"""
network = fake_network_cache_model.new_network({'subnets': []})
subnet_dict = {}
if not gateway:
subnet_dict['gateway'] = None
if not dns:
subnet_dict['dns'] = None
if not should_inject:
subnet_dict['dhcp_server'] = '10.10.0.1'
if use_ipv4:
network.add_subnet(
fake_network_cache_model.new_subnet(subnet_dict))
if should_inject and use_ipv6:
gateway_ip = fake_network_cache_model.new_ip(dict(
address='1234:567::1'))
ip = fake_network_cache_model.new_ip(dict(
address='1234:567::2'))
ipv6_subnet_dict = dict(
cidr='1234:567::/48',
gateway=gateway_ip,
dns=[fake_network_cache_model.new_ip(
dict(address='2001:4860:4860::8888')),
fake_network_cache_model.new_ip(
dict(address='2001:4860:4860::8844'))],
ips=[ip])
if not gateway:
ipv6_subnet_dict['gateway'] = None
network.add_subnet(fake_network_cache_model.new_subnet(
ipv6_subnet_dict))
# Behave as though CONF.flat_injected is True
network['meta']['injected'] = True
vif = fake_network_cache_model.new_vif({'network': network})
vifs = [vif]
if two_interfaces:
vifs.append(vif)
nwinfo = model.NetworkInfo(vifs)
return netutils.get_injected_network_template(
nwinfo, libvirt_virt_type=libvirt_virt_type)
def test_injection_dynamic(self):
expected = None
template = self._setup_injected_network_scenario(should_inject=False)
self.assertEqual(expected, template)
def test_injection_static(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
"""
template = self._setup_injected_network_scenario()
self.assertEqual(expected, template)
def test_injection_static_no_gateway(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
"""
template = self._setup_injected_network_scenario(gateway=False)
self.assertEqual(expected, template)
def test_injection_static_no_dns(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
"""
template = self._setup_injected_network_scenario(dns=False)
self.assertEqual(expected, template)
def test_injection_static_overridden_template(self):
cfg.CONF.set_override(
'injected_network_template',
'nova/tests/unit/network/interfaces-override.template')
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip route add 0.0.0.0/24 via 192.168.1.1 dev eth0
pre-down ip route del 0.0.0.0/24 via 192.168.1.1 dev eth0
"""
template = self._setup_injected_network_scenario()
self.assertEqual(expected, template)
def test_injection_static_ipv6(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
iface eth0 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
gateway 1234:567::1
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
"""
template = self._setup_injected_network_scenario(use_ipv6=True)
self.assertEqual(expected, template)
def test_injection_static_ipv6_no_gateway(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
iface eth0 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
"""
template = self._setup_injected_network_scenario(use_ipv6=True,
gateway=False)
self.assertEqual(expected, template)
def test_injection_static_with_ipv4_off(self):
expected = None
template = self._setup_injected_network_scenario(use_ipv4=False)
self.assertEqual(expected, template)
def test_injection_ipv6_two_interfaces(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
iface eth0 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
gateway 1234:567::1
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
auto eth1
iface eth1 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
iface eth1 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
gateway 1234:567::1
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
"""
template = self._setup_injected_network_scenario(use_ipv6=True,
two_interfaces=True)
self.assertEqual(expected, template)
def test_injection_ipv6_with_lxc(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
auto eth1
iface eth1 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
"""
template = self._setup_injected_network_scenario(
use_ipv6=True, two_interfaces=True, libvirt_virt_type='lxc')
self.assertEqual(expected, template)
def test_injection_ipv6_with_lxc_no_gateway(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
auto eth1
iface eth1 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
"""
template = self._setup_injected_network_scenario(
use_ipv6=True, gateway=False, two_interfaces=True,
libvirt_virt_type='lxc')
self.assertEqual(expected, template)
def test_get_events(self):
network_info = model.NetworkInfo([
model.VIF(
id=uuids.hybrid_vif,
details={'ovs_hybrid_plug': True}),
model.VIF(
id=uuids.normal_vif,
details={'ovs_hybrid_plug': False})])
same_host = objects.Migration(source_compute='fake-host',
dest_compute='fake-host')
diff_host = objects.Migration(source_compute='fake-host1',
dest_compute='fake-host2')
# Same-host migrations will have all events be plug-time.
self.assertItemsEqual(
[('network-vif-plugged', uuids.normal_vif),
('network-vif-plugged', uuids.hybrid_vif)],
network_info.get_plug_time_events(same_host))
# Same host migration will have no plug-time events.
self.assertEqual([], network_info.get_bind_time_events(same_host))
# Diff-host migration + OVS hybrid plug = bind-time events
self.assertEqual(
[('network-vif-plugged', uuids.hybrid_vif)],
network_info.get_bind_time_events(diff_host))
# Diff-host migration + normal OVS = plug-time events
self.assertEqual(
[('network-vif-plugged', uuids.normal_vif)],
network_info.get_plug_time_events(diff_host))
def test_has_port_with_allocation(self):
network_info = model.NetworkInfo([])
self.assertFalse(network_info.has_port_with_allocation())
network_info.append(
model.VIF(id=uuids.port_without_profile))
self.assertFalse(network_info.has_port_with_allocation())
network_info.append(
model.VIF(id=uuids.port_no_allocation, profile={'foo': 'bar'}))
self.assertFalse(network_info.has_port_with_allocation())
network_info.append(
model.VIF(
id=uuids.port_empty_alloc, profile={'allocation': None}))
self.assertFalse(network_info.has_port_with_allocation())
network_info.append(
model.VIF(
id=uuids.port_with_alloc, profile={'allocation': uuids.rp}))
self.assertTrue(network_info.has_port_with_allocation())
class TestNetworkMetadata(test.NoDBTestCase):
def setUp(self):
super(TestNetworkMetadata, self).setUp()
self.netinfo = self._new_netinfo()
def _new_netinfo(self, vif_type='ethernet'):
netinfo = model.NetworkInfo([fake_network_cache_model.new_vif(
{'type': vif_type})])
# Give this vif ipv4 and ipv6 dhcp subnets
ipv4_subnet = fake_network_cache_model.new_subnet(version=4)
ipv6_subnet = fake_network_cache_model.new_subnet(version=6)
netinfo[0]['network']['subnets'][0] = ipv4_subnet
netinfo[0]['network']['subnets'][1] = ipv6_subnet
netinfo[0]['network']['meta']['mtu'] = 1500
return netinfo
def test_get_network_metadata_json(self):
net_metadata = netutils.get_network_metadata(self.netinfo)
# Physical Ethernet
self.assertEqual(
{
'id': 'interface0',
'type': 'phy',
'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa',
'vif_id': 1,
'mtu': 1500
},
net_metadata['links'][0])
# IPv4 Network
self.assertEqual(
{
'id': 'network0',
'link': 'interface0',
'type': 'ipv4',
'ip_address': '10.10.0.2',
'netmask': '255.255.255.0',
'routes': [
{
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'gateway': '10.10.0.1'
},
{
'network': '0.0.0.0',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'
}
],
'services': [{'address': '1.2.3.4', 'type': 'dns'},
{'address': '2.3.4.5', 'type': 'dns'}],
'network_id': 1
},
net_metadata['networks'][0])
self.assertEqual(
{
'id': 'network1',
'link': 'interface0',
'type': 'ipv6',
'ip_address': 'fd00::2',
'netmask': 'ffff:ffff:ffff::',
'routes': [
{
'network': '::',
'netmask': '::',
'gateway': 'fd00::1'
},
{
'network': '::',
'netmask': 'ffff:ffff:ffff::',
'gateway': 'fd00::1:1'
}
],
'services': [{'address': '1:2:3:4::', 'type': 'dns'},
{'address': '2:3:4:5::', 'type': 'dns'}],
'network_id': 1
},
net_metadata['networks'][1])
def test_get_network_metadata_json_dhcp(self):
ipv4_subnet = fake_network_cache_model.new_subnet(
subnet_dict=dict(dhcp_server='1.1.1.1'), version=4)
ipv6_subnet = fake_network_cache_model.new_subnet(
subnet_dict=dict(dhcp_server='1234:567::'), version=6)
self.netinfo[0]['network']['subnets'][0] = ipv4_subnet
self.netinfo[0]['network']['subnets'][1] = ipv6_subnet
net_metadata = netutils.get_network_metadata(self.netinfo)
# IPv4 Network
self.assertEqual(
{
'id': 'network0',
'link': 'interface0',
'type': 'ipv4_dhcp',
'network_id': 1
},
net_metadata['networks'][0])
# IPv6 Network
self.assertEqual(
{
'id': 'network1',
'link': 'interface0',
'type': 'ipv6_dhcp',
'network_id': 1
},
net_metadata['networks'][1])
def _test_get_network_metadata_json_ipv6_addr_mode(self, mode):
ipv6_subnet = fake_network_cache_model.new_subnet(
subnet_dict=dict(dhcp_server='1234:567::',
ipv6_address_mode=mode), version=6)
self.netinfo[0]['network']['subnets'][1] = ipv6_subnet
net_metadata = netutils.get_network_metadata(self.netinfo)
self.assertEqual(
{
'id': 'network1',
'link': 'interface0',
'ip_address': 'fd00::2',
'netmask': 'ffff:ffff:ffff::',
'routes': [
{
'network': '::',
'netmask': '::',
'gateway': 'fd00::1'
},
{
'network': '::',
'netmask': 'ffff:ffff:ffff::',
'gateway': 'fd00::1:1'
}
],
'services': [
{'address': '1:2:3:4::', 'type': 'dns'},
{'address': '2:3:4:5::', 'type': 'dns'}
],
'type': 'ipv6_%s' % mode,
'network_id': 1
},
net_metadata['networks'][1])
def test_get_network_metadata_json_ipv6_addr_mode_slaac(self):
self._test_get_network_metadata_json_ipv6_addr_mode('slaac')
def test_get_network_metadata_json_ipv6_addr_mode_stateful(self):
self._test_get_network_metadata_json_ipv6_addr_mode('dhcpv6-stateful')
def test_get_network_metadata_json_ipv6_addr_mode_stateless(self):
self._test_get_network_metadata_json_ipv6_addr_mode('dhcpv6-stateless')
def test__get_nets(self):
expected_net = {
'id': 'network0',
'ip_address': '10.10.0.2',
'link': 1,
'netmask': '255.255.255.0',
'network_id': 1,
'routes': [
{
'gateway': '10.10.0.1',
'netmask': '0.0.0.0',
'network': '0.0.0.0'},
{
'gateway': '192.168.1.1',
'netmask': '255.255.255.0',
'network': '0.0.0.0'}],
'services': [
{'address': '1.2.3.4', 'type': 'dns'},
{'address': '2.3.4.5', 'type': 'dns'}
],
'type': 'ipv4'
}
net = netutils._get_nets(
self.netinfo[0], self.netinfo[0]['network']['subnets'][0], 4, 0, 1)
self.assertEqual(expected_net, net)
def test__get_eth_link(self):
expected_link = {
'id': 'interface0',
'vif_id': 1,
'type': 'vif',
'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa',
'mtu': 1500
}
self.netinfo[0]['type'] = 'vif'
link = netutils._get_eth_link(self.netinfo[0], 0)
self.assertEqual(expected_link, link)
def test__get_eth_link_physical(self):
expected_link = {
'id': 'interface1',
'vif_id': 1,
'type': 'phy',
'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa',
'mtu': 1500
}
link = netutils._get_eth_link(self.netinfo[0], 1)
self.assertEqual(expected_link, link)
def test__get_default_route(self):
v4_expected = [{
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'gateway': '10.10.0.1',
}]
v6_expected = [{
'network': '::',
'netmask': '::',
'gateway': 'fd00::1'
}]
v4 = netutils._get_default_route(
4, self.netinfo[0]['network']['subnets'][0])
self.assertEqual(v4_expected, v4)
v6 = netutils._get_default_route(
6, self.netinfo[0]['network']['subnets'][1])
self.assertEqual(v6_expected, v6)
# Test for no gateway
self.netinfo[0]['network']['subnets'][0]['gateway'] = None
no_route = netutils._get_default_route(
4, self.netinfo[0]['network']['subnets'][0])
self.assertEqual([], no_route)
def test__get_dns_services(self):
expected_dns = [
{'type': 'dns', 'address': '1.2.3.4'},
{'type': 'dns', 'address': '2.3.4.5'},
{'type': 'dns', 'address': '3.4.5.6'}
]
subnet = fake_network_cache_model.new_subnet(version=4)
subnet['dns'].append(fake_network_cache_model.new_ip(
{'address': '3.4.5.6'}))
dns = netutils._get_dns_services(subnet)
self.assertEqual(expected_dns, dns)
def test_get_network_metadata(self):
expected_json = {
"links": [
{
"ethernet_mac_address": "aa:aa:aa:aa:aa:aa",
"id": "interface0",
"type": "phy",
"vif_id": 1,
"mtu": 1500
},
{
"ethernet_mac_address": "aa:aa:aa:aa:aa:ab",
"id": "interface1",
"type": "phy",
"vif_id": 1,
"mtu": 1500
},
],
"networks": [
{
"id": "network0",
"ip_address": "10.10.0.2",
"link": "interface0",
"netmask": "255.255.255.0",
"network_id":
"00000000-0000-0000-0000-000000000000",
"routes": [
{
"gateway": "10.10.0.1",
"netmask": "0.0.0.0",
"network": "0.0.0.0"
},
{
"gateway": "192.168.1.1",
"netmask": "255.255.255.0",
"network": "0.0.0.0"
}
],
'services': [{'address': '1.2.3.4', 'type': 'dns'},
{'address': '2.3.4.5', 'type': 'dns'}],
"type": "ipv4"
},
{
'id': 'network1',
'ip_address': 'fd00::2',
'link': 'interface0',
'netmask': 'ffff:ffff:ffff::',
'network_id': '00000000-0000-0000-0000-000000000000',
'routes': [{'gateway': 'fd00::1',
'netmask': '::',
'network': '::'},
{'gateway': 'fd00::1:1',
'netmask': 'ffff:ffff:ffff::',
'network': '::'}],
'services': [{'address': '1:2:3:4::', 'type': 'dns'},
{'address': '2:3:4:5::', 'type': 'dns'}],
'type': 'ipv6'
},
{
"id": "network2",
"ip_address": "192.168.0.2",
"link": "interface1",
"netmask": "255.255.255.0",
"network_id":
"11111111-1111-1111-1111-111111111111",
"routes": [
{
"gateway": "192.168.0.1",
"netmask": "0.0.0.0",
"network": "0.0.0.0"
}
],
'services': [{'address': '1.2.3.4', 'type': 'dns'},
{'address': '2.3.4.5', 'type': 'dns'}],
"type": "ipv4"
}
],
'services': [
{'address': '1.2.3.4', 'type': 'dns'},
{'address': '2.3.4.5', 'type': 'dns'},
{'address': '1:2:3:4::', 'type': 'dns'},
{'address': '2:3:4:5::', 'type': 'dns'}
]
}
self.netinfo[0]['network']['id'] = (
'00000000-0000-0000-0000-000000000000')
# Add a second NIC
self.netinfo.append(fake_network_cache_model.new_vif({
'type': 'ethernet', 'address': 'aa:aa:aa:aa:aa:ab'}))
address = fake_network_cache_model.new_ip({'address': '192.168.0.2'})
gateway_address = fake_network_cache_model.new_ip(
{'address': '192.168.0.1'})
ipv4_subnet = fake_network_cache_model.new_subnet(
{'cidr': '192.168.0.0/24', 'gateway': gateway_address,
'ips': [address], 'routes': []})
self.netinfo[1]['network']['id'] = (
'11111111-1111-1111-1111-111111111111')
self.netinfo[1]['network']['subnets'][0] = ipv4_subnet
self.netinfo[1]['network']['meta']['mtu'] = 1500
network_json = netutils.get_network_metadata(self.netinfo)
self.assertEqual(expected_json, network_json)
def test_get_network_metadata_no_ipv4(self):
expected_json = {
"services": [
{
"type": "dns",
"address": "1:2:3:4::"
},
{
"type": "dns",
"address": "2:3:4:5::"
}
],
"networks": [
{
"network_id": 1,
"type": "ipv6",
"netmask": "ffff:ffff:ffff::",
"link": "interface0",
"routes": [
{
"netmask": "::",
"network": "::",
"gateway": "fd00::1"
},
{
"netmask": "ffff:ffff:ffff::",
"network": "::",
"gateway": "fd00::1:1"
}
],
'services': [{'address': '1:2:3:4::', 'type': 'dns'},
{'address': '2:3:4:5::', 'type': 'dns'}],
"ip_address": "fd00::2",
"id": "network0"
}
],
"links": [
{
"ethernet_mac_address": "aa:aa:aa:aa:aa:aa",
"mtu": 1500,
"type": "phy",
"id": "interface0",
"vif_id": 1
}
]
}
# drop the ipv4 subnet
self.netinfo[0]['network']['subnets'].pop(0)
network_json = netutils.get_network_metadata(self.netinfo)
self.assertEqual(expected_json, network_json)
def test_legacy_vif_types_type_passed_through(self):
legacy_types = [
model.VIF_TYPE_BRIDGE,
model.VIF_TYPE_DVS,
model.VIF_TYPE_HW_VEB,
model.VIF_TYPE_HYPERV,
model.VIF_TYPE_OVS,
model.VIF_TYPE_TAP,
model.VIF_TYPE_VHOSTUSER,
model.VIF_TYPE_VIF,
]
link_types = []
for vif_type in legacy_types:
network_json = netutils.get_network_metadata(
self._new_netinfo(vif_type=vif_type))
link_types.append(network_json["links"][0]["type"])
self.assertEqual(legacy_types, link_types)
def test_new_vif_types_get_type_phy(self):
new_types = ["whizbang_nvf", "vswitch9"]
link_types = []
for vif_type in new_types:
network_json = netutils.get_network_metadata(
self._new_netinfo(vif_type=vif_type))
link_types.append(network_json["links"][0]["type"])
self.assertEqual(["phy"] * len(new_types), link_types)
| apache-2.0 | -1,657,925,334,694,846,000 | 36.033457 | 79 | 0.53678 | false |
HybridF5/jacket | jacket/api/compute/openstack/compute/limits.py | 1 | 2400 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jacket.api.compute.openstack.compute.legacy_v2 import limits
from jacket.api.compute.openstack.compute.views import limits as limits_views
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
from jacket.compute import quota
# NOTE(alex_xu): This is just for keeping backward compatible with v2 endpoint
# in api-paste.ini. This will be removed after v2 API code deprecated in the
# future.
RateLimitingMiddleware = limits.RateLimitingMiddleware
QUOTAS = quota.QUOTAS
ALIAS = 'limits'
authorize = extensions.os_compute_authorizer(ALIAS)
class LimitsController(wsgi.Controller):
"""Controller for accessing limits in the OpenStack API."""
@extensions.expected_errors(())
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['compute.context']
authorize(context)
project_id = req.params.get('tenant_id', context.project_id)
quotas = QUOTAS.get_project_quotas(context, project_id,
usages=False)
abs_limits = {k: v['limit'] for k, v in quotas.items()}
rate_limits = req.environ.get("compute.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def _get_view_builder(self, req):
return limits_views.ViewBuilderV21()
class Limits(extensions.V21APIExtensionBase):
"""Limits support."""
name = "Limits"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
LimitsController())]
return resource
def get_controller_extensions(self):
return []
| apache-2.0 | 5,010,607,489,460,766,000 | 34.820896 | 78 | 0.68625 | false |
Fokko/incubator-airflow | airflow/contrib/example_dags/example_emr_job_flow_automatic_steps.py | 1 | 2363 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is an example dag for a AWS EMR Pipeline with auto steps.
"""
from datetime import timedelta
import airflow
from airflow import DAG
from airflow.contrib.operators.emr_create_job_flow_operator import EmrCreateJobFlowOperator
from airflow.contrib.sensors.emr_job_flow_sensor import EmrJobFlowSensor
DEFAULT_ARGS = {
'owner': 'Airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False
}
SPARK_TEST_STEPS = [
{
'Name': 'calculate_pi',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': [
'/usr/lib/spark/bin/run-example',
'SparkPi',
'10'
]
}
}
]
JOB_FLOW_OVERRIDES = {
'Name': 'PiCalc',
'Steps': SPARK_TEST_STEPS
}
with DAG(
dag_id='emr_job_flow_automatic_steps_dag',
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=2),
schedule_interval='0 3 * * *'
) as dag:
job_flow_creator = EmrCreateJobFlowOperator(
task_id='create_job_flow',
job_flow_overrides=JOB_FLOW_OVERRIDES,
aws_conn_id='aws_default',
emr_conn_id='emr_default'
)
job_sensor = EmrJobFlowSensor(
task_id='check_job_flow',
job_flow_id="{{ task_instance.xcom_pull(task_ids='create_job_flow', key='return_value') }}",
aws_conn_id='aws_default'
)
job_flow_creator >> job_sensor
| apache-2.0 | -6,546,665,006,355,624,000 | 29.294872 | 100 | 0.658485 | false |
elvandy/nltools | nltools/datasets.py | 1 | 4739 | '''
NeuroLearn datasets
===================
functions to help download datasets
'''
## Notes:
# Need to figure out how to speed up loading and resampling of data
__all__ = ['download_nifti',
'get_collection_image_metadata',
'download_collection',
'fetch_emotion_ratings',
'fetch_pain']
__author__ = ["Luke Chang"]
__license__ = "MIT"
import os
import pandas as pd
from nltools.data import Brain_Data
from nilearn.datasets.utils import _get_dataset_dir, _fetch_file
from pynv import Client
# Optional dependencies
try:
import requests
except ImportError:
pass
def download_nifti(url, data_dir=None):
''' Download a image to a nifti file.'''
local_filename = url.split('/')[-1]
if data_dir is not None:
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
local_filename = os.path.join(data_dir,local_filename)
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return local_filename
def get_collection_image_metadata(collection=None, data_dir = None,
limit=10):
''' Get image metadata associated with collection
Args:
collection: (int) collection id
data_dir: (str) data directory
limit: (int) number of images to increment
Returns:
metadata: (pd.DataFrame) Dataframe with full image metadata from
collection
'''
if os.path.isfile(os.path.join(data_dir, 'metadata.csv')):
dat = pd.read_csv(os.path.join(data_dir, 'metadata.csv'))
else:
offset = 0
api = Client()
i = api.get_collection_images(collection_id=collection, limit=limit,offset=offset)
dat = pd.DataFrame(columns=i['results'][0].keys())
while int(offset) < int(i['count']):
for x in i['results']:
dat = dat.append(x, ignore_index=True)
offset = offset + limit
i = api.get_collection_images(collection_id=collection, limit=limit, offset=offset)
dat.to_csv(os.path.join(data_dir,'metadata.csv'), index=False)
return dat
def download_collection(collection=None, data_dir=None, overwrite=False,
resume=True, verbose=1):
''' Download images and metadata from Neurovault collection
Args:
collection: (int) collection id
data_dir: (str) data directory
Returns:
metadata: (pd.DataFrame) Dataframe with full image metadata from
collection
files: (list) list of files of downloaded collection
'''
if data_dir is None:
data_dir = _get_dataset_dir(str(collection), data_dir=data_dir,
verbose=verbose)
# Get collection Metadata
metadata = get_collection_image_metadata(collection=collection,
data_dir=data_dir)
# Get images
files = []
for f in metadata['file']:
files.append(_fetch_file(f, data_dir, resume=resume, verbose=verbose,
overwrite=overwrite))
return (metadata,files)
def fetch_pain(data_dir=None, resume=True, verbose=1):
'''Download and loads pain dataset from neurovault
Args:
data_dir: string, optional
Path of the data directory. Used to force data storage in a
specified location. Default: None
n_subjects: int, optional
Number of subjects, from 1 to 6.
NOTE: n_subjects is deprecated from 0.2.6 and will be removed in 0.3
Use `subjects` instead.
subjects : list or int, optional
Either a list of subjects or the number of subjects to load, from
1 to 6. By default, 2nd subject will be loaded. Empty list returns
no subject data
Returns:
'''
collection = 504
dataset_name = 'chang2015_pain'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
metadata, files = download_collection(collection=collection,
data_dir=data_dir, resume=resume,
verbose=verbose)
return Brain_Data(data=files, X=metadata)
def fetch_emotion_ratings(data_dir=None, resume=True, verbose=1):
'''Download and loads emotion rating dataset from neurovault
Args:
data_dir: (string, optional). Path of the data directory. Used to
force data storage in a specified location. Default: None
n_subjects: (int, optional) Number of subjects, from 1 to 6.
NOTE: n_subjects is deprecated from 0.2.6 and will be
removed in 0.3 Use `subjects` instead.
subjects : (list or int, optional) Either a list of subjects or the
number of subjects to load, from 1 to 6. By default,
2nd subject will be loaded. Empty list returns no subject
data
Returns:
'''
collection = 1964
dataset_name = 'chang2015_emotion_ratings'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
metadata, files = download_collection(collection=collection,
data_dir=data_dir, resume=resume,
verbose=verbose)
return Brain_Data(data=files, X=metadata)
| mit | 7,979,483,591,160,874,000 | 28.993671 | 86 | 0.703313 | false |
qilicun/python | python2/PyMOTW-1.132/PyMOTW/calendar/calendar_textcalendar.py | 1 | 1210 | #!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Sample of TextCalendar output.
"""
__module_id__ = "$Id$"
#end_pymotw_header
import calendar
c = calendar.TextCalendar(calendar.SUNDAY)
c.prmonth(2007, 7)
| gpl-3.0 | -6,277,574,415,358,937,000 | 32.611111 | 70 | 0.760331 | false |
Arubacloud/pyArubaCloud | examples/create_pro_vm.py | 1 | 1437 | import argparse
import logging
from pprint import pprint
from ArubaCloud.PyArubaAPI import CloudInterface
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--datacenter', help='Specify datacenter to login.', action='store', type=int, dest='dc')
parser.add_argument('-u', '--username', help='Specify username.', action='store', dest='username')
parser.add_argument('-w', '--password', help='Specify password.', action='store', dest='password')
parser.add_argument('-t', '--template', help='Specify template.', action='store', dest='template')
parser.add_argument('-n', '--name', help='Specify VM name', action='store', dest='vmname')
parser.add_argument('--vmpassword', help='Specify VM admin password.', action='store', dest='vmpassword',
default='ArubaCloud2015')
p = parser.parse_args()
i = CloudInterface(dc=p.dc, debug_level=logging.DEBUG)
i.login(username=p.username, password=p.password, load=False)
ip = i.purchase_ip(debug=True)
from ArubaCloud.objects import ProVmCreator
c = ProVmCreator(name=p.vmname, admin_password=p.vmpassword, template_id=p.template, auth_obj=i.auth)
c.set_cpu_qty(2)
c.set_ram_qty(6)
c.add_public_ip(public_ip_address_resource_id=ip.resid)
c.add_virtual_disk(40)
c.add_virtual_disk(40)
pprint(c.get_json())
print(c.commit(url=i.wcf_baseurl, debug=True))
| apache-2.0 | -7,066,435,289,594,719,000 | 42.545455 | 119 | 0.675017 | false |
zuowang/Paddle | python/paddle/trainer/PyDataProvider2.py | 1 | 13558 | # Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle
import logging
import collections
import functools
import itertools
logging.basicConfig(
format="[%(levelname)s %(asctime)s %(filename)s:%(lineno)s]"
" %(message)s")
class SequenceType(object):
NO_SEQUENCE = 0
SEQUENCE = 1
SUB_SEQUENCE = 2
# TODO(yuyang18): Add string data type here.
class DataType(object):
Dense = 0
SparseNonValue = 1
SparseValue = 2
Index = 3
class CacheType(object):
NO_CACHE = 0 # No cache at all
# First pass, read data from python. And store them in memory. Read from
# memory during rest passes.
CACHE_PASS_IN_MEM = 1
class InputType(object):
__slots__ = ['dim', 'seq_type', 'type']
def __init__(self, dim, seq_type, tp):
self.dim = dim
self.seq_type = seq_type
self.type = tp
def dense_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
return InputType(dim, seq_type, DataType.Dense)
def sparse_non_value_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
return InputType(dim, seq_type, DataType.SparseNonValue)
def sparse_value_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
return InputType(dim, seq_type, DataType.SparseValue)
def index_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
return InputType(dim, seq_type, DataType.Index)
dense_vector = dense_slot
sparse_binary_vector = sparse_non_value_slot
sparse_vector = sparse_value_slot
integer_value = index_slot
def dense_vector_sequence(dim):
return dense_vector(dim, seq_type=SequenceType.SEQUENCE)
def dense_vector_sub_sequence(dim):
return dense_vector(dim, seq_type=SequenceType.SUB_SEQUENCE)
def sparse_binary_vector_sequence(dim):
return sparse_binary_vector(dim, seq_type=SequenceType.SEQUENCE)
def sparse_binary_vector_sub_sequence(dim):
return sparse_binary_vector(dim, seq_type=SequenceType.SUB_SEQUENCE)
def sparse_vector_sequence(dim):
return sparse_vector(dim, seq_type=SequenceType.SEQUENCE)
def sparse_vector_sub_sequence(dim):
return sparse_vector(dim, seq_type=SequenceType.SUB_SEQUENCE)
def integer_value_sequence(dim):
return integer_value(dim, seq_type=SequenceType.SEQUENCE)
def integer_value_sub_sequence(dim):
return integer_value(dim, seq_type=SequenceType.SUB_SEQUENCE)
def integer_sequence(dim):
return index_slot(dim, seq_type=SequenceType.SEQUENCE)
class SingleSlotWrapper(object):
def __init__(self, generator):
self.generator = generator
def __call__(self, obj, filename):
for item in self.generator(obj, filename):
if isinstance(item, dict):
yield item
else:
yield [item]
class InputOrderWrapper(object):
def __init__(self, generator, input_order):
self.generator = generator
self.input_order = input_order
def __call__(self, obj, filename):
for item in self.generator(obj, filename):
if isinstance(item, dict):
yield [item.get(input_name, None) for input_name in
self.input_order]
else:
yield item
class CheckWrapper(object):
def __init__(self, generator, input_types, check_fail_continue, logger):
self.generator = generator
self.input_types = input_types
self.check_fail_continue = check_fail_continue
self.logger = logger
def __call__(self, obj, filename):
for items in self.generator(obj, filename):
try:
assert len(items) == len(self.input_types)
assert len(filter(lambda x: x is None, items)) == 0
for item, input_type in itertools.izip(items, self.input_types):
callback = functools.partial(CheckWrapper.loop_callback,
input_type)
for _ in xrange(input_type.seq_type):
callback = functools.partial(CheckWrapper.loop_check,
callback)
callback(item)
yield items
except AssertionError as e:
self.logger.warning(
"Item (%s) is not fit the input type with error %s"
% (repr(item), repr(e)))
if self.check_fail_continue:
continue
else:
raise
@staticmethod
def loop_callback(input_type, each):
assert isinstance(input_type, InputType)
if input_type.type == DataType.Dense:
assert isinstance(each, collections.Sequence)
for d in each:
assert isinstance(d, float)
assert len(each, input_type.dim)
elif input_type.type == DataType.Index:
assert isinstance(each, int)
assert each < input_type.dim
elif input_type.type == DataType.SparseNonValue \
or input_type.type == DataType.SparseValue:
assert isinstance(each, collections.Sequence)
sparse_id = set()
for k in each:
if input_type.type == DataType.SparseValue:
k, v = k
assert isinstance(v, float)
assert isinstance(k, int)
assert k < input_type.dim
sparse_id.add(k)
assert len(sparse_id) == len(each)
else:
raise RuntimeError("Not support input type")
@staticmethod
def loop_check(callback, item):
for each in item:
callback(each)
def provider(input_types=None, should_shuffle=None, pool_size=-1,
min_pool_size=-1,
can_over_batch_size=True,
calc_batch_size=None,
cache=CacheType.NO_CACHE,
check=False, check_fail_continue=False,
init_hook=None, **kwargs):
"""
Provider decorator. Use it to make a function into PyDataProvider2 object.
In this function, user only need to get each sample for some train/test
file.
The basic usage is:
.. code-block:: python
@provider(some data provider config here...)
def process(settings, file_name):
while not at end of file_name:
sample = readOneSampleFromFile(file_name)
yield sample.
The configuration of data provider should be setup by\:
:param input_types: Specify the input types, can also be set in init_hook.
It could be a list of InputType object. For example,
input_types=[dense_vector(9), integer_value(2)]. Or user
can set a dict of InputType object, which key is
data_layer's name. For example, input_types=\
{'img': img_features, 'label': label}. when using dict of
InputType, user could yield a dict of feature values, which
key is also data_layer's name.
:type input_types: list|tuple|dict
:param should_shuffle: True if data should shuffle. Pass None means shuffle
when is training and not to shuffle when is testing.
:type should_shuffle: bool
:param pool_size: Max number of sample in data pool.
:type pool_size: int
:param min_pool_size: Set minimal sample in data pool. The PaddlePaddle will
random pick sample in pool. So the min_pool_size
effect the randomize of data.
:type min_pool_size: int
:param can_over_batch_size: True if paddle can return a mini-batch larger
than batch size in settings. It is useful when
custom calculate one sample's batch_size.
It is very danger to set it to false and use
calc_batch_size together. Default is false.
:type can_over_batch_size: bool
:param calc_batch_size: a method to calculate each sample's batch size.
Default each sample's batch size is 1. But to you
can customize each sample's batch size.
:type calc_batch_size: callable
:param cache: Cache strategy of Data Provider. Default is CacheType.NO_CACHE
:type cache: int
:param init_hook: Initialize hook. Useful when data provider need load some
external data like dictionary. The parameter is
(settings, file_list, \*\*kwargs).
- settings. It is the global settings object. User can set
settings.input_types here.
- file_list. All file names for passed to data provider.
- is_train. Is this data provider used for training or not.
- kwargs. Other keyword arguments passed from
trainer_config's args parameter.
:type init_hook: callable
:param check: Check the yield data format is as same as input_types. Enable
this will make data provide process slow but it is very useful
for debug. Default is disabled.
:type check: bool
:param check_fail_continue: Continue train or not when check failed. Just
drop the wrong format data when it is True. Has
no effect when check set to False.
:type check_fail_continue: bool
"""
def __wrapper__(generator):
class DataProvider(object):
def __init__(self, file_list, **kwargs):
self.logger = logging.getLogger("")
self.logger.setLevel(logging.INFO)
self.input_types = None
if 'slots' in kwargs:
self.logger.warning('setting slots value is deprecated, '
'please use input_types instead.')
self.slots = kwargs['slots']
self.slots = input_types
self.should_shuffle = should_shuffle
true_table = [1, 't', 'true', 'on']
false_table = [0, 'f', 'false', 'off']
if not isinstance(self.should_shuffle, bool) and \
self.should_shuffle is not None:
if isinstance(self.should_shuffle, basestring):
self.should_shuffle = self.should_shuffle.lower()
if self.should_shuffle in true_table:
self.should_shuffle = True
elif self.should_shuffle in false_table:
self.should_shuffle = False
else:
self.logger.warning(
"Could not recognize should_shuffle (%s), "
"just use default value of should_shuffle."
" Please set should_shuffle to bool value or "
"something in %s" % (
repr(self.should_shuffle),
repr(true_table + false_table)))
self.should_shuffle = None
self.pool_size = pool_size
self.can_over_batch_size = can_over_batch_size
self.calc_batch_size = calc_batch_size
self.file_list = file_list
self.generator = generator
self.cache = cache
self.min_pool_size = min_pool_size
self.input_order = kwargs['input_order']
self.check = check
if init_hook is not None:
init_hook(self, file_list=file_list, **kwargs)
if self.input_types is not None:
self.slots = self.input_types
assert self.slots is not None
assert self.generator is not None
use_dynamic_order = False
if isinstance(self.slots, dict): # reorder input_types
self.slots = [self.slots[ipt] for ipt in self.input_order]
use_dynamic_order = True
if len(self.slots) == 1:
self.generator = SingleSlotWrapper(self.generator)
if use_dynamic_order:
self.generator = InputOrderWrapper(self.generator,
self.input_order)
if self.check:
self.generator = CheckWrapper(self.generator,
self.slots,
check_fail_continue,
self.logger)
return DataProvider
return __wrapper__
def deserialize_args(args):
"""
Internal use only.
:param args:
:return:
"""
return cPickle.loads(args)
| apache-2.0 | -1,903,039,986,429,761,800 | 35.544474 | 83 | 0.567119 | false |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/SCOP/__init__.py | 1 | 31882 | # Copyright 2001 by Gavin E. Crooks. All rights reserved.
# Modifications Copyright 2004/2005 James Casbon. All rights Reserved.
# Modifications Copyright 2010 Jeffrey Finkelstein. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Changes made by James Casbon:
# - New Astral class
# - SQL functionality for both Scop and Astral classes
# - All sunids are int not strings
#
# Code written by Jeffrey Chang to access SCOP over the internet, which
# was previously in Bio.WWW.SCOP, has now been merged into this module.
""" SCOP: Structural Classification of Proteins.
The SCOP database aims to provide a manually constructed classification of
all know protein structures into a hierarchy, the main levels of which
are family, superfamily and fold.
* "SCOP":http://scop.mrc-lmb.cam.ac.uk/scop/
* "Introduction":http://scop.mrc-lmb.cam.ac.uk/scop/intro.html
* "SCOP parsable files":http://scop.mrc-lmb.cam.ac.uk/scop/parse/
The Scop object in this module represents the entire SCOP classification. It
can be built from the three SCOP parsable files, modified is so desired, and
converted back to the same file formats. A single SCOP domain (represented
by the Domain class) can be obtained from Scop using the domain's SCOP
identifier (sid).
nodeCodeDict -- A mapping between known 2 letter node codes and a longer
description. The known node types are 'cl' (class), 'cf'
(fold), 'sf' (superfamily), 'fa' (family), 'dm' (domain),
'sp' (species), 'px' (domain). Additional node types may
be added in the future.
This module also provides code to access SCOP over the WWW.
Functions:
search -- Access the main CGI script.
_open -- Internally used function.
"""
from __future__ import print_function
import os
import re
from . import Des
from . import Cla
from . import Hie
from . import Residues
from Bio import SeqIO
from Bio.Seq import Seq
__docformat__ = "restructuredtext en"
nodeCodeDict = {'cl': 'class', 'cf': 'fold', 'sf': 'superfamily',
'fa': 'family', 'dm': 'protein', 'sp': 'species', 'px': 'domain'}
_nodetype_to_code = {'class': 'cl', 'fold': 'cf', 'superfamily': 'sf',
'family': 'fa', 'protein': 'dm', 'species': 'sp', 'domain': 'px'}
nodeCodeOrder = ['ro', 'cl', 'cf', 'sf', 'fa', 'dm', 'sp', 'px']
astralBibIds = [10, 20, 25, 30, 35, 40, 50, 70, 90, 95, 100]
astralEvs = [10, 5, 1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 1e-4, 1e-5, 1e-10, 1e-15,
1e-20, 1e-25, 1e-50]
astralEv_to_file = {10: 'e+1', 5: 'e+0,7', 1: 'e+0', 0.5: 'e-0,3', 0.1: 'e-1',
0.05: 'e-1,3', 0.01: 'e-2', 0.005: 'e-2,3', 0.001: 'e-3',
1e-4: 'e-4', 1e-5: 'e-5', 1e-10: 'e-10', 1e-15: 'e-15',
1e-20: 'e-20', 1e-25: 'e-25', 1e-50: 'e-50'}
astralEv_to_sql = {10: 'e1', 5: 'e0_7', 1: 'e0', 0.5: 'e_0_3', 0.1: 'e_1',
0.05: 'e_1_3', 0.01: 'e_2', 0.005: 'e_2_3', 0.001: 'e_3',
1e-4: 'e_4', 1e-5: 'e_5', 1e-10: 'e_10', 1e-15: 'e_15',
1e-20: 'e_20', 1e-25: 'e_25', 1e-50: 'e_50'}
try:
# See if the cmp function exists (will on Python 2)
_cmp = cmp
except NameError:
def _cmp(a, b):
"""Implementation of cmp(x,y) for Python 3 (PRIVATE).
Based on Python 3 docs which say if you really need the cmp()
functionality, you could use the expression (a > b) - (a < b)
as the equivalent for cmp(a, b)
"""
return (a > b) - (a < b)
def cmp_sccs(sccs1, sccs2):
"""Order SCOP concise classification strings (sccs).
a.4.5.1 < a.4.5.11 < b.1.1.1
A sccs (e.g. a.4.5.11) compactly represents a domain's classification.
The letter represents the class, and the numbers are the fold,
superfamily, and family, respectively.
"""
s1 = sccs1.split(".")
s2 = sccs2.split(".")
if s1[0] != s2[0]:
return _cmp(s1[0], s2[0])
s1 = [int(x) for x in s1[1:]]
s2 = [int(x) for x in s2[1:]]
return _cmp(s1, s2)
_domain_re = re.compile(r">?([\w_\.]*)\s+([\w\.]*)\s+\(([^)]*)\) (.*)")
def parse_domain(str):
"""Convert an ASTRAL header string into a Scop domain.
An ASTRAL (http://astral.stanford.edu/) header contains a concise
description of a SCOP domain. A very similar format is used when a
Domain object is converted into a string. The Domain returned by this
method contains most of the SCOP information, but it will not be located
within the SCOP hierarchy (i.e. The parent node will be None). The
description is composed of the SCOP protein and species descriptions.
A typical ASTRAL header looks like --
>d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli}
"""
m = _domain_re.match(str)
if (not m):
raise ValueError("Domain: " + str)
dom = Domain()
dom.sid = m.group(1)
dom.sccs = m.group(2)
dom.residues = Residues.Residues(m.group(3))
if not dom.residues.pdbid:
dom.residues.pdbid = dom.sid[1:5]
dom.description = m.group(4).strip()
return dom
def _open_scop_file(scop_dir_path, version, filetype):
filename = "dir.%s.scop.txt_%s" % (filetype, version)
handle = open(os.path.join(scop_dir_path, filename))
return handle
class Scop(object):
"""The entire SCOP hierarchy.
root -- The root node of the hierarchy
"""
def __init__(self, cla_handle=None, des_handle=None, hie_handle=None,
dir_path=None, db_handle=None, version=None):
"""Build the SCOP hierarchy from the SCOP parsable files, or a sql backend.
If no file handles are given, then a Scop object with a single
empty root node is returned.
If a directory and version are given (with dir_path=.., version=...) or
file handles for each file, the whole scop tree will be built in memory.
If a MySQLdb database handle is given, the tree will be built as needed,
minimising construction times. To build the SQL database to the methods
write_xxx_sql to create the tables.
"""
self._sidDict = {}
self._sunidDict = {}
if all(h is None for h in [cla_handle, des_handle, hie_handle, dir_path, db_handle]):
return
if dir_path is None and db_handle is None:
if cla_handle is None or des_handle is None or hie_handle is None:
raise RuntimeError("Need CLA, DES and HIE files to build SCOP")
sunidDict = {}
self.db_handle = db_handle
try:
if db_handle:
# do nothing if we have a db handle, we'll do it all on the fly
pass
else:
# open SCOP parseable files
if dir_path:
if not version:
raise RuntimeError("Need SCOP version to find parsable files in directory")
if cla_handle or des_handle or hie_handle:
raise RuntimeError("Cannot specify SCOP directory and specific files")
cla_handle = _open_scop_file(dir_path, version, 'cla')
des_handle = _open_scop_file(dir_path, version, 'des')
hie_handle = _open_scop_file(dir_path, version, 'hie')
root = Node()
domains = []
root.sunid = 0
root.type = 'ro'
sunidDict[root.sunid] = root
self.root = root
root.description = 'SCOP Root'
# Build the rest of the nodes using the DES file
records = Des.parse(des_handle)
for record in records:
if record.nodetype == 'px':
n = Domain()
n.sid = record.name
domains.append(n)
else:
n = Node()
n.sunid = record.sunid
n.type = record.nodetype
n.sccs = record.sccs
n.description = record.description
sunidDict[n.sunid] = n
# Glue all of the Nodes together using the HIE file
records = Hie.parse(hie_handle)
for record in records:
if record.sunid not in sunidDict:
print(record.sunid)
n = sunidDict[record.sunid]
if record.parent != '': # Not root node
if record.parent not in sunidDict:
raise ValueError("Incomplete data?")
n.parent = sunidDict[record.parent]
for c in record.children:
if c not in sunidDict:
raise ValueError("Incomplete data?")
n.children.append(sunidDict[c])
# Fill in the gaps with information from the CLA file
sidDict = {}
records = Cla.parse(cla_handle)
for record in records:
n = sunidDict[record.sunid]
assert n.sccs == record.sccs
assert n.sid == record.sid
n.residues = record.residues
sidDict[n.sid] = n
# Clean up
self._sunidDict = sunidDict
self._sidDict = sidDict
self._domains = tuple(domains)
finally:
if dir_path:
# If we opened the files, we close the files
if cla_handle:
cla_handle.close()
if des_handle:
des_handle.close()
if hie_handle:
hie_handle.close()
def getRoot(self):
return self.getNodeBySunid(0)
def getDomainBySid(self, sid):
"""Return a domain from its sid"""
if sid in self._sidDict:
return self._sidDict[sid]
if self.db_handle:
self.getDomainFromSQL(sid=sid)
if sid in self._sidDict:
return self._sidDict[sid]
else:
return None
def getNodeBySunid(self, sunid):
"""Return a node from its sunid"""
if sunid in self._sunidDict:
return self._sunidDict[sunid]
if self.db_handle:
self.getDomainFromSQL(sunid=sunid)
if sunid in self._sunidDict:
return self._sunidDict[sunid]
else:
return None
def getDomains(self):
"""Returns an ordered tuple of all SCOP Domains"""
if self.db_handle:
return self.getRoot().getDescendents('px')
else:
return self._domains
def write_hie(self, handle):
"""Build an HIE SCOP parsable file from this object"""
# We order nodes to ease comparison with original file
for n in sorted(self._sunidDict.values(), key=lambda n: n.sunid):
handle.write(str(n.toHieRecord()))
def write_des(self, handle):
"""Build a DES SCOP parsable file from this object"""
# Origional SCOP file is not ordered?
for n in sorted(self._sunidDict.values(), key=lambda n: n.sunid):
if n != self.root:
handle.write(str(n.toDesRecord()))
def write_cla(self, handle):
"""Build a CLA SCOP parsable file from this object"""
# We order nodes to ease comparison with original file
for n in sorted(self._sidDict.values(), key=lambda n: n.sunid):
handle.write(str(n.toClaRecord()))
def getDomainFromSQL(self, sunid=None, sid=None):
"""Load a node from the SQL backend using sunid or sid"""
if sunid is None and sid is None:
return None
cur = self.db_handle.cursor()
if sid:
cur.execute("SELECT sunid FROM cla WHERE sid=%s", sid)
res = cur.fetchone()
if res is None:
return None
sunid = res[0]
cur.execute("SELECT * FROM des WHERE sunid=%s", sunid)
data = cur.fetchone()
if data is not None:
n = None
# determine if Node or Domain
if data[1] != "px":
n = Node(scop=self)
cur.execute("SELECT child FROM hie WHERE parent=%s", sunid)
children = []
for c in cur.fetchall():
children.append(c[0])
n.children = children
else:
n = Domain(scop=self)
cur.execute("select sid, residues, pdbid from cla where sunid=%s",
sunid)
[n.sid, n.residues, pdbid] = cur.fetchone()
n.residues = Residues.Residues(n.residues)
n.residues.pdbid = pdbid
self._sidDict[n.sid] = n
[n.sunid, n.type, n.sccs, n.description] = data
if data[1] != 'ro':
cur.execute("SELECT parent FROM hie WHERE child=%s", sunid)
n.parent = cur.fetchone()[0]
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
def getAscendentFromSQL(self, node, type):
"""Get ascendents using SQL backend"""
if nodeCodeOrder.index(type) >= nodeCodeOrder.index(node.type):
return None
cur = self.db_handle.cursor()
cur.execute("SELECT " + type + " from cla WHERE " + node.type + "=%s", (node.sunid))
result = cur.fetchone()
if result is not None:
return self.getNodeBySunid(result[0])
else:
return None
def getDescendentsFromSQL(self, node, type):
"""Get descendents of a node using the database backend. This avoids
repeated iteration of SQL calls and is therefore much quicker than
repeatedly calling node.getChildren().
"""
if nodeCodeOrder.index(type) <= nodeCodeOrder.index(node.type):
return []
des_list = []
# SQL cla table knows nothing about 'ro'
if node.type == 'ro':
for c in node.getChildren():
for d in self.getDescendentsFromSQL(c, type):
des_list.append(d)
return des_list
cur = self.db_handle.cursor()
if type != 'px':
cur.execute("SELECT DISTINCT des.sunid,des.type,des.sccs,description FROM \
cla,des WHERE cla." + node.type + "=%s AND cla." + type + "=des.sunid", (node.sunid))
data = cur.fetchall()
for d in data:
if int(d[0]) not in self._sunidDict:
n = Node(scop=self)
[n.sunid, n.type, n.sccs, n.description] = d
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
cur.execute("SELECT parent FROM hie WHERE child=%s", n.sunid)
n.parent = cur.fetchone()[0]
cur.execute("SELECT child FROM hie WHERE parent=%s", n.sunid)
children = []
for c in cur.fetchall():
children.append(c[0])
n.children = children
des_list.append(self._sunidDict[int(d[0])])
else:
cur.execute("SELECT cla.sunid,sid,pdbid,residues,cla.sccs,type,description,sp\
FROM cla,des where cla.sunid=des.sunid and cla." + node.type + "=%s",
node.sunid)
data = cur.fetchall()
for d in data:
if int(d[0]) not in self._sunidDict:
n = Domain(scop=self)
# [n.sunid, n.sid, n.pdbid, n.residues, n.sccs, n.type,
# n.description,n.parent] = data
[n.sunid, n.sid, pdbid, n.residues, n.sccs, n.type, n.description,
n.parent] = d[0:8]
n.residues = Residues.Residues(n.residues)
n.residues.pdbid = pdbid
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
self._sidDict[n.sid] = n
des_list.append(self._sunidDict[int(d[0])])
return des_list
def write_hie_sql(self, handle):
"""Write HIE data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS hie")
cur.execute("CREATE TABLE hie (parent INT, child INT, PRIMARY KEY (child),\
INDEX (parent) )")
for p in self._sunidDict.values():
for c in p.children:
cur.execute("INSERT INTO hie VALUES (%s,%s)" % (p.sunid, c.sunid))
def write_cla_sql(self, handle):
"""Write CLA data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS cla")
cur.execute("CREATE TABLE cla (sunid INT, sid CHAR(8), pdbid CHAR(4),\
residues VARCHAR(50), sccs CHAR(10), cl INT, cf INT, sf INT, fa INT,\
dm INT, sp INT, px INT, PRIMARY KEY (sunid), INDEX (SID) )")
for n in self._sidDict.values():
c = n.toClaRecord()
cur.execute("INSERT INTO cla VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(n.sunid, n.sid, c.residues.pdbid, c.residues, n.sccs,
n.getAscendent('cl').sunid, n.getAscendent('cf').sunid,
n.getAscendent('sf').sunid, n.getAscendent('fa').sunid,
n.getAscendent('dm').sunid, n.getAscendent('sp').sunid,
n.sunid))
def write_des_sql(self, handle):
"""Write DES data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS des")
cur.execute("CREATE TABLE des (sunid INT, type CHAR(2), sccs CHAR(10),\
description VARCHAR(255),\
PRIMARY KEY (sunid) )")
for n in self._sunidDict.values():
cur.execute("INSERT INTO des VALUES (%s,%s,%s,%s)",
(n.sunid, n.type, n.sccs, n.description))
class Node(object):
""" A node in the Scop hierarchy
sunid -- SCOP unique identifiers. e.g. '14986'
parent -- The parent node
children -- A list of child nodes
sccs -- SCOP concise classification string. e.g. 'a.1.1.2'
type -- A 2 letter node type code. e.g. 'px' for domains
description --
"""
def __init__(self, scop=None):
"""Create a Node in the scop hierarchy. If a Scop instance is provided to the
constructor, this will be used to lookup related references using the SQL
methods. If no instance is provided, it is assumed the whole tree exists
and is connected."""
self.sunid = ''
self.parent = None
self.children = []
self.sccs = ''
self.type = ''
self.description = ''
self.scop = scop
def __str__(self):
s = []
s.append(str(self.sunid))
s.append(self.sccs)
s.append(self.type)
s.append(self.description)
return " ".join(s)
def toHieRecord(self):
"""Return an Hie.Record"""
rec = Hie.Record()
rec.sunid = str(self.sunid)
if self.getParent(): # Not root node
rec.parent = str(self.getParent().sunid)
else:
rec.parent = '-'
for c in self.getChildren():
rec.children.append(str(c.sunid))
return rec
def toDesRecord(self):
"""Return a Des.Record"""
rec = Des.Record()
rec.sunid = str(self.sunid)
rec.nodetype = self.type
rec.sccs = self.sccs
rec.description = self.description
return rec
def getChildren(self):
"""Return a list of children of this Node"""
if self.scop is None:
return self.children
else:
return [self.scop.getNodeBySunid(x) for x in self.children]
def getParent(self):
"""Return the parent of this Node"""
if self.scop is None:
return self.parent
else:
return self.scop.getNodeBySunid(self.parent)
def getDescendents(self, node_type):
""" Return a list of all decendent nodes of the given type. Node type can a
two letter code or longer description. e.g. 'fa' or 'family'
"""
if node_type in _nodetype_to_code:
node_type = _nodetype_to_code[node_type]
nodes = [self]
if self.scop:
return self.scop.getDescendentsFromSQL(self, node_type)
while nodes[0].type != node_type:
if nodes[0].type == 'px':
return [] # Fell of the bottom of the hierarchy
child_list = []
for n in nodes:
for child in n.getChildren():
child_list.append(child)
nodes = child_list
return nodes
def getAscendent(self, node_type):
""" Return the ancenstor node of the given type, or None.Node type can a
two letter code or longer description. e.g. 'fa' or 'family'"""
if node_type in _nodetype_to_code:
node_type = _nodetype_to_code[node_type]
if self.scop:
return self.scop.getAscendentFromSQL(self, node_type)
else:
n = self
if n.type == node_type:
return None
while n.type != node_type:
if n.type == 'ro':
return None # Fell of the top of the hierarchy
n = n.getParent()
return n
class Domain(Node):
""" A SCOP domain. A leaf node in the Scop hierarchy.
- sid -- The SCOP domain identifier. e.g. ``"d5hbib_"``
- residues -- A Residue object. It defines the collection
of PDB atoms that make up this domain.
"""
def __init__(self, scop=None):
Node.__init__(self, scop=scop)
self.sid = ''
self.residues = None
def __str__(self):
s = []
s.append(self.sid)
s.append(self.sccs)
s.append("(" + str(self.residues) + ")")
if not self.getParent():
s.append(self.description)
else:
sp = self.getParent()
dm = sp.getParent()
s.append(dm.description)
s.append("{" + sp.description + "}")
return " ".join(s)
def toDesRecord(self):
"""Return a Des.Record"""
rec = Node.toDesRecord(self)
rec.name = self.sid
return rec
def toClaRecord(self):
"""Return a Cla.Record"""
rec = Cla.Record()
rec.sid = self.sid
rec.residues = self.residues
rec.sccs = self.sccs
rec.sunid = self.sunid
n = self
while n.sunid != 0: # Not root node
rec.hierarchy[n.type] = str(n.sunid)
n = n.getParent()
# Order does not matter in the hierarchy field. For more info, see
# http://scop.mrc-lmb.cam.ac.uk/scop/release-notes.html
# rec.hierarchy.reverse()
return rec
class Astral(object):
"""Abstraction of the ASTRAL database, which has sequences for all the SCOP domains,
as well as clusterings by percent id or evalue.
"""
def __init__(self, dir_path=None, version=None, scop=None,
astral_file=None, db_handle=None):
"""
Initialise the astral database.
You must provide either a directory of SCOP files:
- dir_path - string, the path to location of the scopseq-x.xx directory
(not the directory itself), and
- version -a version number.
or, a FASTA file:
- astral_file - string, a path to a fasta file (which will be loaded in memory)
or, a MYSQL database:
- db_handle - a database handle for a MYSQL database containing a table
'astral' with the astral data in it. This can be created
using writeToSQL.
"""
if astral_file is None and dir_path is None and db_handle is None:
raise RuntimeError("Need either file handle, or (dir_path + "
+ "version) or database handle to construct Astral")
if not scop:
raise RuntimeError("Must provide a Scop instance to construct")
self.scop = scop
self.db_handle = db_handle
if not astral_file and not db_handle:
if dir_path is None or version is None:
raise RuntimeError("must provide dir_path and version")
self.version = version
self.path = os.path.join(dir_path, "scopseq-%s" % version)
astral_file = "astral-scopdom-seqres-all-%s.fa" % self.version
astral_file = os.path.join(self.path, astral_file)
if astral_file:
# Build a dictionary of SeqRecord objects in the FASTA file, IN MEMORY
self.fasta_dict = SeqIO.to_dict(SeqIO.parse(astral_file, "fasta"))
self.astral_file = astral_file
self.EvDatasets = {}
self.EvDatahash = {}
self.IdDatasets = {}
self.IdDatahash = {}
def domainsClusteredByEv(self, id):
"""get domains clustered by evalue"""
if id not in self.EvDatasets:
if self.db_handle:
self.EvDatasets[id] = self.getAstralDomainsFromSQL(astralEv_to_sql[id])
else:
if not self.path:
raise RuntimeError("No scopseq directory specified")
file_prefix = "astral-scopdom-seqres-sel-gs"
filename = "%s-e100m-%s-%s.id" % (file_prefix, astralEv_to_file[id],
self.version)
filename = os.path.join(self.path, filename)
self.EvDatasets[id] = self.getAstralDomainsFromFile(filename)
return self.EvDatasets[id]
def domainsClusteredById(self, id):
"""get domains clustered by percent id"""
if id not in self.IdDatasets:
if self.db_handle:
self.IdDatasets[id] = self.getAstralDomainsFromSQL("id" + str(id))
else:
if not self.path:
raise RuntimeError("No scopseq directory specified")
file_prefix = "astral-scopdom-seqres-sel-gs"
filename = "%s-bib-%s-%s.id" % (file_prefix, id, self.version)
filename = os.path.join(self.path, filename)
self.IdDatasets[id] = self.getAstralDomainsFromFile(filename)
return self.IdDatasets[id]
def getAstralDomainsFromFile(self, filename=None, file_handle=None):
"""Get the scop domains from a file containing a list of sids"""
if file_handle is None and filename is None:
raise RuntimeError("You must provide a filename or handle")
if not file_handle:
file_handle = open(filename)
doms = []
while True:
line = file_handle.readline()
if not line:
break
line = line.rstrip()
doms.append(line)
if filename:
file_handle.close()
doms = [a for a in doms if a[0] == 'd']
doms = [self.scop.getDomainBySid(x) for x in doms]
return doms
def getAstralDomainsFromSQL(self, column):
"""Load a set of astral domains from a column in the astral table of a MYSQL
database (which can be created with writeToSQL(...)"""
cur = self.db_handle.cursor()
cur.execute("SELECT sid FROM astral WHERE " + column + "=1")
data = cur.fetchall()
data = [self.scop.getDomainBySid(x[0]) for x in data]
return data
def getSeqBySid(self, domain):
"""get the seq record of a given domain from its sid"""
if self.db_handle is None:
return self.fasta_dict[domain].seq
else:
cur = self.db_handle.cursor()
cur.execute("SELECT seq FROM astral WHERE sid=%s", domain)
return Seq(cur.fetchone()[0])
def getSeq(self, domain):
"""Return seq associated with domain"""
return self.getSeqBySid(domain.sid)
def hashedDomainsById(self, id):
"""Get domains clustered by sequence identity in a dict"""
if id not in self.IdDatahash:
self.IdDatahash[id] = {}
for d in self.domainsClusteredById(id):
self.IdDatahash[id][d] = 1
return self.IdDatahash[id]
def hashedDomainsByEv(self, id):
"""Get domains clustered by evalue in a dict"""
if id not in self.EvDatahash:
self.EvDatahash[id] = {}
for d in self.domainsClusteredByEv(id):
self.EvDatahash[id][d] = 1
return self.EvDatahash[id]
def isDomainInId(self, dom, id):
"""Returns true if the domain is in the astral clusters for percent ID"""
return dom in self.hashedDomainsById(id)
def isDomainInEv(self, dom, id):
"""Returns true if the domain is in the ASTRAL clusters for evalues"""
return dom in self.hashedDomainsByEv(id)
def writeToSQL(self, db_handle):
"""Write the ASTRAL database to a MYSQL database"""
cur = db_handle.cursor()
cur.execute("DROP TABLE IF EXISTS astral")
cur.execute("CREATE TABLE astral (sid CHAR(8), seq TEXT, PRIMARY KEY (sid))")
for dom in self.fasta_dict:
cur.execute("INSERT INTO astral (sid,seq) values (%s,%s)",
(dom, self.fasta_dict[dom].seq.data))
for i in astralBibIds:
cur.execute("ALTER TABLE astral ADD (id" + str(i) + " TINYINT)")
for d in self.domainsClusteredById(i):
cur.execute("UPDATE astral SET id" + str(i) + "=1 WHERE sid=%s",
d.sid)
for ev in astralEvs:
cur.execute("ALTER TABLE astral ADD (" + astralEv_to_sql[ev] + " TINYINT)")
for d in self.domainsClusteredByEv(ev):
cur.execute("UPDATE astral SET " + astralEv_to_sql[ev] + "=1 WHERE sid=%s",
d.sid)
def search(pdb=None, key=None, sid=None, disp=None, dir=None, loc=None,
cgi='http://scop.mrc-lmb.cam.ac.uk/scop/search.cgi', **keywds):
"""search(pdb=None, key=None, sid=None, disp=None, dir=None, loc=None,
cgi='http://scop.mrc-lmb.cam.ac.uk/scop/search.cgi', **keywds)
Access search.cgi and return a handle to the results. See the
online help file for an explanation of the parameters:
http://scop.mrc-lmb.cam.ac.uk/scop/help.html
Raises an IOError if there's a network error.
"""
params = {'pdb': pdb, 'key': key, 'sid': sid, 'disp': disp,
'dir': dir, 'loc': loc}
variables = {}
for k, v in params.items():
if v is not None:
variables[k] = v
variables.update(keywds)
return _open(cgi, variables)
def _open(cgi, params={}, get=1):
"""_open(cgi, params={}, get=1) -> UndoHandle
Open a handle to SCOP. cgi is the URL for the cgi script to access.
params is a dictionary with the options to pass to it. get is a boolean
that describes whether a GET should be used. Does some
simple error checking, and will raise an IOError if it encounters one.
"""
from Bio._py3k import urlopen, urlencode
# Open a handle to SCOP.
options = urlencode(params)
if get: # do a GET
if options:
cgi += "?" + options
handle = urlopen(cgi)
else: # do a POST
handle = urlopen(cgi, data=options)
return handle
| apache-2.0 | 2,075,764,463,224,599,300 | 34.662192 | 99 | 0.552977 | false |
mcs07/ChemDataExtractor | chemdataextractor/cli/pos.py | 1 | 11619 | # -*- coding: utf-8 -*-
"""
chemdataextractor.cli.pos
~~~~~~~~~~~~~~~~~~~~~~~~~
Part of speech tagging commands.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import click
from ..doc import Document, Text
from ..nlp.corpus import genia_training, wsj_training, wsj_evaluation, genia_evaluation
from ..nlp.pos import TAGS, ChemApPosTagger, ChemCrfPosTagger
log = logging.getLogger(__name__)
@click.group(name='pos')
@click.pass_context
def pos_cli(ctx):
"""POS tagger commands."""
pass
@pos_cli.command()
@click.option('--output', '-o', help='Output model file.', required=True)
@click.pass_context
def train_all(ctx, output):
"""Train POS tagger on WSJ, GENIA, and both. With and without cluster features."""
click.echo('chemdataextractor.pos.train_all')
click.echo('Output: %s' % output)
ctx.invoke(train, output='%s_wsj_nocluster.pickle' % output, corpus='wsj', clusters=False)
ctx.invoke(train, output='%s_wsj.pickle' % output, corpus='wsj', clusters=True)
ctx.invoke(train, output='%s_genia_nocluster.pickle' % output, corpus='genia', clusters=False)
ctx.invoke(train, output='%s_genia.pickle' % output, corpus='genia', clusters=True)
ctx.invoke(train, output='%s_wsj_genia_nocluster.pickle' % output, corpus='wsj+genia', clusters=False)
ctx.invoke(train, output='%s_wsj_genia.pickle' % output, corpus='wsj+genia', clusters=True)
@pos_cli.command()
@click.argument('model', required=True)
@click.pass_context
def evaluate_all(ctx, model):
"""Evaluate POS taggers on WSJ and GENIA."""
click.echo('chemdataextractor.pos.evaluate_all')
click.echo('Model: %s' % model)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True)
@pos_cli.command()
@click.option('--output', '-o', help='Output model file.', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia', 'wsj+genia']), help='Training corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_context
def train(ctx, output, corpus, clusters):
"""Train POS Tagger."""
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
# For WSJ, remove all tokens with -NONE- tag
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
# Translate GENIA
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation)
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation)
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == '-':
genia_sents[i][j] = (token, ':') # Single hyphen character for dash
elif tag == 'N':
genia_sents[i][j] = (token, 'NN') # Typo?
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP') # Typo?
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-') # Typo?
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN') # Typo?
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part
# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(clusters=clusters)
tagger.train(training_corpus, output)
@pos_cli.command()
@click.argument('model', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia']), help='Evaluation corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_context
def evaluate(ctx, model, corpus, clusters):
"""Evaluate performance of POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
sents[i][j] = (token, '-LRB-')
elif tag == ')':
sents[i][j] = (token, '-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(model=model, clusters=clusters)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy))
@pos_cli.command()
@click.option('--output', '-o', type=click.File('wb'), help='Output model file.', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia', 'wsj+genia']), help='Training corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_obj
def train_perceptron(ctx, output, corpus, clusters):
"""Train Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
# For WSJ, remove all tokens with -NONE- tag
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
# Translate GENIA
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation)
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation)
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == '-':
genia_sents[i][j] = (token, ':') # Single hyphen character for dash
elif tag == 'N':
genia_sents[i][j] = (token, 'NN') # Typo?
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP') # Typo?
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-') # Typo?
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN') # Typo?
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part
# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(clusters=clusters)
tagger.train(training_corpus)
tagger.save(output)
@pos_cli.command()
@click.argument('model', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia']), help='Evaluation corpus')
@click.pass_obj
def evaluate_perceptron(ctx, model, corpus):
"""Evaluate performance of Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == u'-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == u'(':
sents[i][j] = (token, u'-LRB-')
elif tag == u')':
sents[i][j] = (token, u'-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(model=model)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy))
@pos_cli.command()
@click.option('--output', '-o', type=click.File('w', encoding='utf8'), help='Output file.', default=click.get_text_stream('stdout'))
@click.argument('input', type=click.File('rb'), default=click.get_binary_stream('stdin'))
@click.pass_obj
def tag(ctx, input, output):
"""Output POS-tagged tokens."""
log.info('chemdataextractor.pos.tag')
log.info('Reading %s' % input.name)
doc = Document.from_file(input)
for element in doc.elements:
if isinstance(element, Text):
for sentence in element.sentences:
output.write(u' '.join(u'/'.join([token, tag]) for token, tag in sentence.pos_tagged_tokens))
output.write(u'\n')
| mit | -1,216,581,177,639,905,800 | 42.845283 | 132 | 0.58697 | false |
CollinRooney12/htsprophet | htsprophet/fitForecast.py | 1 | 13224 | # -*- coding: utf-8 -*-
"""
Name: fitForecast.py
Author: Collin Rooney
Last Updated: 7/18/2017
This script will contain functions for all types of hierarchical modeling approaches.
It will use the prophet package as a forecasting tool.
The general idea of it is very similar to the hts package in R, but it is a little
more specific with how the dataframe is put together.
Credit to Rob J. Hyndman and research partners as much of the code was developed with the help of their work
https://www.otexts.org/fpp
https://robjhyndman.com/publications/
Credit to Facebook and their fbprophet package
https://facebookincubator.github.io/prophet/
It was my intention to make some of the code look similar to certain sections in the Prophet and (Hyndman's) hts packages
"""
import pandas as pd
import numpy as np
from fbprophet import Prophet
import contextlib, os
from scipy.special import inv_boxcox
#%%
def fitForecast(y, h, sumMat, nodes, method, freq, include_history, cap, capF, changepoints, n_changepoints, \
yearly_seasonality, weekly_seasonality, daily_seasonality, holidays, seasonality_prior_scale, \
holidays_prior_scale, changepoint_prior_scale, mcmc_samples, interval_width, uncertainty_samples, \
boxcoxT, skipFitting):
forecastsDict = {}
mse = {}
resids = {}
nForecasts = sumMat.shape[0]
##
# If you have a ditionary of Prophet Dataframes already, skip the prophet part, and put all the values into a dictionary
##
if skipFitting == True:
for key in range(len(y.columns.tolist())-1):
forecastsDict[key] = pd.DataFrame(y.iloc[:,key+1])
forecastsDict[key] = forecastsDict[key].rename(columns = {forecastsDict[key].columns[0] : 'yhat'})
if skipFitting == False:
if method == 'FP':
nForecasts = sum(list(map(sum, nodes)))+1
for node in range(nForecasts):
nodeToForecast = pd.concat([y.iloc[:, [0]], y.iloc[:, node+1]], axis = 1)
if isinstance(cap, pd.DataFrame):
cap1 = cap.iloc[:, node]
else:
cap1 = cap
if isinstance(capF, pd.DataFrame):
cap2 = capF.iloc[:, node]
else:
cap2 = capF
if isinstance(changepoints, pd.DataFrame):
changepoints1 = changepoints[:, node]
else:
changepoints1 = changepoints
if isinstance(n_changepoints, list):
n_changepoints1 = n_changepoints[node]
else:
n_changepoints1 = n_changepoints
##
# Put the forecasts into a dictionary of dataframes
##
with contextlib.redirect_stdout(open(os.devnull, "w")):
# Prophet related stuff
nodeToForecast = nodeToForecast.rename(columns = {nodeToForecast.columns[0] : 'ds'})
nodeToForecast = nodeToForecast.rename(columns = {nodeToForecast.columns[1] : 'y'})
if capF is None:
growth = 'linear'
m = Prophet(growth=growth,
changepoints=changepoints1,
n_changepoints=n_changepoints1,
yearly_seasonality=yearly_seasonality,
weekly_seasonality=weekly_seasonality,
daily_seasonality=daily_seasonality,
holidays=holidays,
seasonality_prior_scale=seasonality_prior_scale,
holidays_prior_scale=holidays_prior_scale,
changepoint_prior_scale=changepoint_prior_scale,
mcmc_samples=mcmc_samples,
interval_width=interval_width,
uncertainty_samples=uncertainty_samples)
else:
growth = 'logistic'
m = Prophet(growth=growth,
changepoints=changepoints,
n_changepoints=n_changepoints,
yearly_seasonality=yearly_seasonality,
weekly_seasonality=weekly_seasonality,
daily_seasonality=daily_seasonality,
holidays=holidays,
seasonality_prior_scale=seasonality_prior_scale,
holidays_prior_scale=holidays_prior_scale,
changepoint_prior_scale=changepoint_prior_scale,
mcmc_samples=mcmc_samples,
interval_width=interval_width,
uncertainty_samples=uncertainty_samples)
nodeToForecast['cap'] = cap1
m.fit(nodeToForecast)
future = m.make_future_dataframe(periods = h, freq = freq, include_history = include_history)
if capF is not None:
future['cap'] = cap2
##
# Base Forecasts, Residuals, and MSE
##
forecastsDict[node] = m.predict(future)
resids[node] = y.iloc[:, node+1] - forecastsDict[node].yhat[:-h].values
mse[node] = np.mean(np.array(resids[node])**2)
##
# If logistic use exponential function, so that values can be added correctly
##
if capF is not None:
forecastsDict[node].yhat = np.exp(forecastsDict[node].yhat)
if boxcoxT is not None:
forecastsDict[node].yhat = inv_boxcox(forecastsDict[node].yhat, boxcoxT[node])
forecastsDict[node].trend = inv_boxcox(forecastsDict[node].trend, boxcoxT[node])
if "seasonal" in forecastsDict[node].columns.tolist():
forecastsDict[node].seasonal = inv_boxcox(forecastsDict[node].seasonal, boxcoxT[node])
if "daily" in forecastsDict[node].columns.tolist():
forecastsDict[node].daily = inv_boxcox(forecastsDict[node].daily, boxcoxT[node])
if "weekly" in forecastsDict[node].columns.tolist():
forecastsDict[node].weekly = inv_boxcox(forecastsDict[node].weekly, boxcoxT[node])
if "yearly" in forecastsDict[node].columns.tolist():
forecastsDict[node].yearly = inv_boxcox(forecastsDict[node].yearly, boxcoxT[node])
if "holidays" in forecastsDict[node].columns.tolist():
forecastsDict[node].yearly = inv_boxcox(forecastsDict[node].yearly, boxcoxT[node])
##
# Now, Revise them
##
if method == 'BU' or method == 'AHP' or method == 'PHA':
y1 = y.copy()
nCols = len(list(forecastsDict.keys()))+1
if method == 'BU':
'''
Pros:
No information lost due to aggregation
Cons:
Bottom level data can be noisy and more challenging to model and forecast
'''
hatMat = np.zeros([len(forecastsDict[0].yhat),1])
for key in range(nCols-sumMat.shape[1]-1, nCols-1):
f1 = np.array(forecastsDict[key].yhat)
f2 = f1[:, np.newaxis]
if np.all(hatMat == 0):
hatMat = f2
else:
hatMat = np.concatenate((hatMat, f2), axis = 1)
if method == 'AHP':
'''
Pros:
Creates reliable aggregate forecasts, and good for low count data
Cons:
Unable to capture individual series dynamics
'''
if boxcoxT is not None:
for column in range(len(y.columns.tolist())-1):
y1.iloc[:,column+1] = inv_boxcox(y1.iloc[:, column+1], boxcoxT[column])
##
# Find Proportions
##
fcst = forecastsDict[0].yhat
fcst = fcst[:, np.newaxis]
numBTS = sumMat.shape[1]
btsDat = pd.DataFrame(y1.iloc[:,nCols-numBTS:nCols])
divs = np.divide(np.transpose(np.array(btsDat)),np.array(y1.iloc[:,1]))
props = divs.mean(1)
props = props[:, np.newaxis]
hatMat = np.dot(np.array(fcst),np.transpose(props))
if method == 'PHA':
'''
Pros:
Creates reliable aggregate forecasts, and good for low count data
Cons:
Unable to capture individual series dynamics
'''
if boxcoxT is not None:
for column in range(len(y.columns.tolist())-1):
y1.iloc[:,column+1] = inv_boxcox(y1.iloc[:, column+1], boxcoxT[column])
##
# Find Proportions
##
fcst = forecastsDict[0].yhat
fcst = fcst[:, np.newaxis]
numBTS = sumMat.shape[1]
btsDat = pd.DataFrame(y1.iloc[:,nCols-numBTS:nCols])
btsSum = btsDat.sum(0)
topSum = sum(y1.iloc[:,1])
props = btsSum/topSum
props = props[:, np.newaxis]
hatMat = np.dot(np.array(fcst),np.transpose(props))
newMat = np.empty([hatMat.shape[0],sumMat.shape[0]])
for i in range(hatMat.shape[0]):
newMat[i,:] = np.dot(sumMat, np.transpose(hatMat[i,:]))
if method == 'FP':
newMat = forecastProp(forecastsDict, nodes)
if method == 'OLS' or method == 'WLSS' or method == 'WLSV':
if capF is not None:
print("An error might occur because of how these methods are defined (They can produce negative values). If it does, then please use another method")
newMat = optimalComb(forecastsDict, sumMat, method, mse)
for key in forecastsDict.keys():
values = forecastsDict[key].yhat.values
values = newMat[:,key]
forecastsDict[key].yhat = values
##
# If Logistic fit values with natural log function to revert back to format of input
##
if capF is not None:
forecastsDict[key].yhat = np.log(forecastsDict[key].yhat)
return forecastsDict
#%%
def forecastProp(forecastsDict, nodes):
'''
Cons:
Produces biased revised forecasts even if base forecasts are unbiased
'''
nCols = len(list(forecastsDict.keys()))+1
##
# Find proportions of forecast at each step ahead, and then alter forecasts
##
levels = len(nodes)
column = 0
firstNode = 1
newMat = np.empty([len(forecastsDict[0].yhat),nCols - 1])
newMat[:,0] = forecastsDict[0].yhat
lst = [x for x in range(nCols-1)]
for level in range(levels):
nodesInLevel = len(nodes[level])
foreSum = 0
for node in range(nodesInLevel):
numChild = nodes[level][node]
lastNode = firstNode + numChild
lst = [x for x in range(firstNode, lastNode)]
baseFcst = np.array([forecastsDict[k].yhat[:] for k in lst])
foreSum = np.sum(baseFcst, axis = 0)
foreSum = foreSum[:, np.newaxis]
if column == 0:
revTop = np.array(forecastsDict[column].yhat)
revTop = revTop[:, np.newaxis]
else:
revTop = np.array(newMat[:,column])
revTop = revTop[:, np.newaxis]
newMat[:,firstNode:lastNode] = np.divide(np.multiply(np.transpose(baseFcst), revTop), foreSum)
column += 1
firstNode += numChild
return newMat
#%%
def optimalComb(forecastsDict, sumMat, method, mse):
hatMat = np.zeros([len(forecastsDict[0].yhat),1])
for key in forecastsDict.keys():
f1 = np.array(forecastsDict[key].yhat)
f2 = f1[:, np.newaxis]
if np.all(hatMat == 0):
hatMat = f2
else:
hatMat = np.concatenate((hatMat, f2), axis = 1)
##
# Multiply the Summing Matrix Together S*inv(S'S)*S'
##
if method == "OLS":
optiMat = np.dot(np.dot(sumMat, np.linalg.inv(np.dot(np.transpose(sumMat), sumMat))),np.transpose(sumMat))
if method == "WLSS":
diagMat = np.diag(np.transpose(np.sum(sumMat, axis = 1)))
optiMat = np.dot(np.dot(np.dot(sumMat, np.linalg.inv(np.dot(np.dot(np.transpose(sumMat), np.linalg.inv(diagMat)), sumMat))), np.transpose(sumMat)), np.linalg.inv(diagMat))
if method == "WLSV":
diagMat = [mse[key] for key in mse.keys()]
diagMat = np.diag(np.flip(np.hstack(diagMat)+0.0000001, 0))
optiMat = np.dot(np.dot(np.dot(sumMat, np.linalg.inv(np.dot(np.dot(np.transpose(sumMat), np.linalg.inv(diagMat)), sumMat))), np.transpose(sumMat)), np.linalg.inv(diagMat))
newMat = np.empty([hatMat.shape[0],sumMat.shape[0]])
for i in range(hatMat.shape[0]):
newMat[i,:] = np.dot(optiMat, np.transpose(hatMat[i,:]))
return newMat | mit | 7,184,139,017,834,954,000 | 45.080139 | 179 | 0.548851 | false |
tommasoberlose/p2p_bittorrent | SocketFunc.py | 1 | 1263 | import socket
####### SOCKET
def create_socket_server(myHost, port):
s = None
for res in socket.getaddrinfo(None, int(port), socket.AF_UNSPEC,socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.bind(sa)
s.listen(1000)
except socket.error as msg:
s.close()
s = None
continue
break
return s
def create_socket_client(myHost, port):
s = None
for res in socket.getaddrinfo(myHost, int(port), socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.connect(sa)
except socket.error as msg:
s.close()
s = None
continue
break
return s
def forward(pk, addr, l):
if pk != bytes(const.ERROR_PKT, "ascii"):
for x in l:
if addr != x[0]:
s = func.create_socket_client(func.roll_the_dice(x[0]), x[1])
if not(s is None):
s.sendall(pk)
#write_daemon_success("Daemon", "-", "Forward da " + addr + " a " + x[0])
s.close()
| mit | -3,098,031,255,870,166,000 | 24.26 | 107 | 0.585115 | false |
ruleant/weblate | weblate/trans/__init__.py | 1 | 1568 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from south.signals import post_migrate
from django.dispatch import receiver
@receiver(post_migrate)
def create_permissions_compat(app, **kwargs):
'''
Creates permissions like syncdb would if we were not using South
See http://south.aeracode.org/ticket/211
'''
from django.db.models import get_app, get_models
from django.conf import settings
from django.contrib.auth.management import create_permissions
if app in ('trans', 'lang', 'accounts'):
try:
create_permissions(
get_app(app), get_models(), 2 if settings.DEBUG else 0
)
except AttributeError as error:
# See https://code.djangoproject.com/ticket/20442
print 'Failed to create permission objects: {0}'.format(error)
| gpl-3.0 | 193,900,625,314,900,740 | 36.261905 | 74 | 0.701597 | false |
carlitux/turboengine | src/turboengine/__init__.py | 1 | 1702 | ##################################################################################
# The MIT License - turboengine
#
# Copyright (c) Oct 2010 - Luis C. Cruz <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##################################################################################
__version__ = "0.0.1"
__author__ = "Luis C. Cruz <[email protected]>"
def register_templatetags():
""" Register templatetags defined in settings as basic templatetags """
from turboengine.conf import settings
from google.appengine.ext.webapp import template
for python_file in settings.TEMPLATE_PATH:
template.register_template_library(python_file)
| mit | 6,363,081,894,837,103,000 | 50.575758 | 82 | 0.686839 | false |
nlgndnmz/ctpsingle | BeyerHedetmieni.py | 1 | 3481 |
import sys
# G is the gamma matrix
# par is the parent array
# n is the number of nodes
def writeGammaMatrix(gammaFile, G, par, n):
for i in range(n):
for j in range(n):
G[i][j] = 0
for i in range(n):
G[i][i] = 1
j = par[i]-1
while j > -1:
G[j][i] = 1
j = par[j]-1
for i in range(n):
for j in range(n):
gammaFile.write(str(G[i][j]) + ' ')
gammaFile.write('\n')
gammaFile.write('\n')
# par is the parent array
# n is the number of nodes
def writeAdjMatrix(adjFile, par, n):
adjFile.write(str(n-1)) # number of edges
for i in range(1, n):
adjFile.write(' ' + str(par[i]-1) + ' ' + str(i) + ' ')
adjFile.write('\n');
# writes a dot file to be processed with Graphviz
def writeDotFile(par, n, num):
dotFile = open('./GammaAdjMatrices/dotFile' + str(n) + '_' + str(num-1) + '.dot', 'w')
dotFile.write('digraph G { \n')
for i in range(1, n):
dotFile.write(str(par[i]-1) + ' -> ' + str(i) + ';\n')
dotFile.write('}\n')
dotFile.close()
# n is the number of nodes
# k is the max number of children allowed per node
def getRootedTrees(adjFile, gammaFile, n, k, writeDots):
num = 0
L = []
par = []
levels = []
children = []
G = []
p = n-1
q = 0
for i in range(n):
L.append(i)
par.append(i)
levels.append(i+1)
children.append(0)
G.append([])
for j in range(n):
G[i].append(0)
while (p > 0):
for i in range(n):
children[i] = 0
for i in range(n):
children[par[i]] += 1
if max(children) <= k:
num += 1
writeAdjMatrix(adjFile, par, n)
writeGammaMatrix(gammaFile, G, par, n)
if writeDots:
writeDotFile(par, n, num)
p = 0
for i in range(n-1, -1, -1):
if L[i] > 1:
p = i
break
if p == 0:
break
for i in range(p-1, -1, -1):
if L[i] == L[p] - 1:
q = i
break
for i in range(p, n):
L[i] = L[i-p+q]
for i in range(1, n):
x = L[i]
par[i] = levels[x-1]
levels[x] = i+1
# n is the number of nodes
# k is the max number of children allowed per node
def getNumTrees(n, k):
num = 0
L = []
par = []
levels = []
children = []
p = n-1
q = 0
for i in range(n):
L.append(i)
par.append(i)
levels.append(i+1)
children.append(0)
while (p > 0):
for i in range(n):
children[i] = 0
for i in range(n):
children[par[i]] += 1
if max(children) <= k:
num += 1
p = 0
for i in range(n-1, -1, -1):
if L[i] > 1:
p = i
break
if p == 0:
break
for i in range(p-1, -1, -1):
if L[i] == L[p] - 1:
q = i
break
for i in range(p, n):
L[i] = L[i-p+q]
for i in range(1, n):
x = L[i]
par[i] = levels[x-1]
levels[x] = i+1
return num
if __name__ == "__main__":
if len(sys.argv) < 2:
print 'Usage: python', sys.argv[0], '<max_num_nodes> [<max_branching_factor>]'
sys.exit(0)
maxNumNodes = 1 + int(sys.argv[1])
k = maxNumNodes
if len(sys.argv) == 3:
k = int(sys.argv[2])
for i in range(2, maxNumNodes):
x = getNumTrees(i, k)
print 'Number of trees with ', i, ' nodes: ', x
if x > 100000:
print 'Sorry, that is too many trees to write on file. Aborting now, already written files are ok to use.'
sys.exit(3)
adjFile = open('./GammaAdjMatrices/AdjacencyMatrix' + str(i) + '.txt', 'w')
gammaFile = open('./GammaAdjMatrices/GammaMatrix' + str(i) + '.txt', 'w')
adjFile.write(str(i) + ' ' + str(x) + '\n\n')
gammaFile.write(str(i) + ' ' + str(x) + '\n')
getRootedTrees(adjFile, gammaFile, i, k, False)
adjFile.close()
gammaFile.close()
| gpl-3.0 | 5,346,986,811,560,547,000 | 17.918478 | 109 | 0.561046 | false |
fzimmermann89/pyload | module/plugins/crypter/DevhostStFolder.py | 1 | 2306 | # -*- coding: utf-8 -*-
#
# Test links:
# http://d-h.st/users/shine/?fld_id=37263#files
import re
import urlparse
from module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo
class DevhostStFolder(SimpleCrypter):
__name__ = "DevhostStFolder"
__type__ = "crypter"
__version__ = "0.08"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?d-h\.st/users/(?P<USER>\w+)(/\?fld_id=(?P<ID>\d+))?'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium" , "bool", "Use premium account if available" , True),
("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description__ = """D-h.st folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "[email protected]"),
("Walter Purcaro", "[email protected]")]
LINK_PATTERN = r'(?:/> |;">)<a href="(.+?)"(?!>Back to \w+<)'
OFFLINE_PATTERN = r'"/cHP">test\.png<'
def check_name_size(self, getinfo=True):
if not self.info or getinfo:
self.log_debug("File info (BEFORE): %s" % self.info)
self.info.update(self.get_info(self.pyfile.url, self.html))
self.log_debug("File info (AFTER): %s" % self.info)
try:
if self.info['pattern']['ID'] == "0":
raise
p = r'href="(.+?)">Back to \w+<'
m = re.search(p, self.html)
html = self.load(urlparse.urljoin("http://d-h.st/", m.group(1)),
cookies=False)
p = '\?fld_id=%s.*?">(.+?)<' % self.info['pattern']['ID']
m = re.search(p, html)
self.pyfile.name = m.group(1)
except Exception, e:
self.log_debug(e, trace=True)
self.pyfile.name = self.info['pattern']['USER']
try:
folder = self.info['folder'] = self.pyfile.name
except Exception:
pass
self.log_debug("File name: %s" % self.pyfile.name,
"File folder: %s" % self.pyfile.name)
getInfo = create_getInfo(DevhostStFolder)
| gpl-3.0 | 6,376,989,188,903,538,000 | 33.41791 | 95 | 0.506071 | false |
neerja28/Tempest | tempest/api/compute/servers/test_servers_negative.py | 1 | 21169 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
import testtools
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class ServersNegativeTestJSON(base.BaseV2ComputeTest):
credentials = ['primary', 'alt']
def setUp(self):
super(ServersNegativeTestJSON, self).setUp()
try:
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
def tearDown(self):
self.server_check_teardown()
super(ServersNegativeTestJSON, self).tearDown()
@classmethod
def setup_clients(cls):
super(ServersNegativeTestJSON, cls).setup_clients()
cls.client = cls.servers_client
cls.alt_client = cls.os_alt.servers_client
@classmethod
def resource_setup(cls):
super(ServersNegativeTestJSON, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
@test.attr(type=['negative'])
@test.idempotent_id('dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf')
def test_server_name_blank(self):
# Create a server with name parameter empty
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
name='')
@test.attr(type=['negative'])
@test.idempotent_id('b8a7235e-5246-4a8f-a08e-b34877c6586f')
def test_personality_file_contents_not_encoded(self):
# Use an unencoded file when creating a server with personality
file_contents = 'This is a test file.'
person = [{'path': '/etc/testfile.txt',
'contents': file_contents}]
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
personality=person)
@test.attr(type=['negative'])
@test.idempotent_id('fcba1052-0a50-4cf3-b1ac-fae241edf02f')
def test_create_with_invalid_image(self):
# Create a server with an unknown image
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
image_id=-1)
@test.attr(type=['negative'])
@test.idempotent_id('18f5227f-d155-4429-807c-ccb103887537')
def test_create_with_invalid_flavor(self):
# Create a server with an unknown flavor
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
flavor=-1,)
@test.attr(type=['negative'])
@test.idempotent_id('7f70a4d1-608f-4794-9e56-cb182765972c')
def test_invalid_access_ip_v4_address(self):
# An access IPv4 address must match a valid address pattern
IPv4 = '1.1.1.1.1.1'
self.assertRaises(lib_exc.BadRequest,
self.create_test_server, accessIPv4=IPv4)
@test.attr(type=['negative'])
@test.idempotent_id('5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0')
def test_invalid_ip_v6_address(self):
# An access IPv6 address must match a valid address pattern
IPv6 = 'notvalid'
self.assertRaises(lib_exc.BadRequest,
self.create_test_server, accessIPv6=IPv6)
@test.idempotent_id('7ea45b3e-e770-46fa-bfcc-9daaf6d987c0')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_nonexistent_server(self):
# Resize a non-existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.resize,
nonexistent_server, self.flavor_ref)
@test.idempotent_id('ced1a1d7-2ab6-45c9-b90f-b27d87b30efd')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_server_with_non_existent_flavor(self):
# Resize a server with non-existent flavor
nonexistent_flavor = data_utils.rand_uuid()
self.assertRaises(lib_exc.BadRequest, self.client.resize,
self.server_id, flavor_ref=nonexistent_flavor)
@test.idempotent_id('45436a7d-a388-4a35-a9d8-3adc5d0d940b')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_server_with_null_flavor(self):
# Resize a server with null flavor
self.assertRaises(lib_exc.BadRequest, self.client.resize,
self.server_id, flavor_ref="")
@test.attr(type=['negative'])
@test.idempotent_id('d4c023a0-9c55-4747-9dd5-413b820143c7')
def test_reboot_non_existent_server(self):
# Reboot a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.reboot,
nonexistent_server, 'SOFT')
@test.idempotent_id('d1417e7f-a509-41b5-a102-d5eed8613369')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative'])
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(lib_exc.Conflict,
self.client.pause_server,
self.server_id)
self.client.unpause_server(self.server_id)
@test.attr(type=['negative'])
@test.idempotent_id('98fa0458-1485-440f-873b-fe7f0d714930')
def test_rebuild_reboot_deleted_server(self):
# Rebuild and Reboot a deleted server
server = self.create_test_server()
self.client.delete_server(server['id'])
self.client.wait_for_server_termination(server['id'])
self.assertRaises(lib_exc.NotFound,
self.client.rebuild,
server['id'], self.image_ref_alt)
self.assertRaises(lib_exc.NotFound, self.client.reboot,
server['id'], 'SOFT')
@test.attr(type=['negative'])
@test.idempotent_id('d86141a7-906e-4731-b187-d64a2ea61422')
def test_rebuild_non_existent_server(self):
# Rebuild a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.rebuild,
nonexistent_server,
self.image_ref_alt)
@test.attr(type=['negative'])
@test.idempotent_id('fd57f159-68d6-4c2a-902b-03070828a87e')
def test_create_numeric_server_name(self):
server_name = 12345
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
name=server_name)
@test.attr(type=['negative'])
@test.idempotent_id('c3e0fb12-07fc-4d76-a22e-37409887afe8')
def test_create_server_name_length_exceeds_256(self):
# Create a server with name length exceeding 256 characters
server_name = 'a' * 256
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
name=server_name)
@test.attr(type=['negative'])
@test.idempotent_id('4e72dc2d-44c5-4336-9667-f7972e95c402')
def test_create_with_invalid_network_uuid(self):
# Pass invalid network uuid while creating a server
networks = [{'fixed_ip': '10.0.1.1', 'uuid': 'a-b-c-d-e-f-g-h-i-j'}]
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
networks=networks)
@test.attr(type=['negative'])
@test.idempotent_id('7a2efc39-530c-47de-b875-2dd01c8d39bd')
def test_create_with_non_existent_keypair(self):
# Pass a non-existent keypair while creating a server
key_name = data_utils.rand_name('key')
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
key_name=key_name)
@test.attr(type=['negative'])
@test.idempotent_id('7fc74810-0bd2-4cd7-8244-4f33a9db865a')
def test_create_server_metadata_exceeds_length_limit(self):
# Pass really long metadata while creating a server
metadata = {'a': 'b' * 260}
self.assertRaises((lib_exc.BadRequest, lib_exc.OverLimit),
self.create_test_server,
meta=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('aa8eed43-e2cb-4ebf-930b-da14f6a21d81')
def test_update_name_of_non_existent_server(self):
# Update name of a non-existent server
server_name = data_utils.rand_name('server')
new_name = data_utils.rand_name('server') + '_updated'
self.assertRaises(lib_exc.NotFound, self.client.update_server,
server_name, name=new_name)
@test.attr(type=['negative'])
@test.idempotent_id('38204696-17c6-44da-9590-40f87fb5a899')
def test_update_server_set_empty_name(self):
# Update name of the server to an empty string
server_name = data_utils.rand_name('server')
new_name = ''
self.assertRaises(lib_exc.BadRequest, self.client.update_server,
server_name, name=new_name)
@test.attr(type=['negative'])
@test.idempotent_id('543d84c1-dd2e-4c6d-8cb2-b9da0efaa384')
def test_update_server_of_another_tenant(self):
# Update name of a server that belongs to another tenant
new_name = self.server_id + '_new'
self.assertRaises(lib_exc.NotFound,
self.alt_client.update_server, self.server_id,
name=new_name)
@test.attr(type=['negative'])
@test.idempotent_id('5c8e244c-dada-4590-9944-749c455b431f')
def test_update_server_name_length_exceeds_256(self):
# Update name of server exceed the name length limit
new_name = 'a' * 256
self.assertRaises(lib_exc.BadRequest,
self.client.update_server,
self.server_id,
name=new_name)
@test.attr(type=['negative'])
@test.idempotent_id('1041b4e6-514b-4855-96a5-e974b60870a3')
def test_delete_non_existent_server(self):
# Delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.delete_server,
nonexistent_server)
@test.attr(type=['negative'])
@test.idempotent_id('5c75009d-3eea-423e-bea3-61b09fd25f9c')
def test_delete_a_server_of_another_tenant(self):
# Delete a server that belongs to another tenant
self.assertRaises(lib_exc.NotFound,
self.alt_client.delete_server,
self.server_id)
@test.attr(type=['negative'])
@test.idempotent_id('75f79124-277c-45e6-a373-a1d6803f4cc4')
def test_delete_server_pass_negative_id(self):
# Pass an invalid string parameter to delete server
self.assertRaises(lib_exc.NotFound, self.client.delete_server, -1)
@test.attr(type=['negative'])
@test.idempotent_id('f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5')
def test_delete_server_pass_id_exceeding_length_limit(self):
# Pass a server ID that exceeds length limit to delete server
self.assertRaises(lib_exc.NotFound, self.client.delete_server,
sys.maxint + 1)
@test.attr(type=['negative'])
@test.idempotent_id('c5fa6041-80cd-483b-aa6d-4e45f19d093c')
def test_create_with_nonexistent_security_group(self):
# Create a server with a nonexistent security group
security_groups = [{'name': 'does_not_exist'}]
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
security_groups=security_groups)
@test.attr(type=['negative'])
@test.idempotent_id('3436b02f-1b1e-4f03-881e-c6a602327439')
def test_get_non_existent_server(self):
# Get a non existent server details
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.get_server,
nonexistent_server)
@test.attr(type=['negative'])
@test.idempotent_id('a31460a9-49e1-42aa-82ee-06e0bb7c2d03')
def test_stop_non_existent_server(self):
# Stop a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.servers_client.stop,
nonexistent_server)
@test.idempotent_id('6a8dc0c6-6cd4-4c0a-9f32-413881828091')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative'])
def test_pause_non_existent_server(self):
# pause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.pause_server,
nonexistent_server)
@test.idempotent_id('705b8e3a-e8a7-477c-a19b-6868fc24ac75')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative'])
def test_unpause_non_existent_server(self):
# unpause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.unpause_server,
nonexistent_server)
@test.idempotent_id('c8e639a7-ece8-42dd-a2e0-49615917ba4f')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative'])
def test_unpause_server_invalid_state(self):
# unpause an active server.
self.assertRaises(lib_exc.Conflict,
self.client.unpause_server,
self.server_id)
@test.idempotent_id('d1f032d5-7b6e-48aa-b252-d5f16dd994ca')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative'])
def test_suspend_non_existent_server(self):
# suspend a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.suspend_server,
nonexistent_server)
@test.idempotent_id('7f323206-05a9-4bf8-996b-dd5b2036501b')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative'])
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
self.client.suspend_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.assertRaises(lib_exc.Conflict,
self.client.suspend_server,
self.server_id)
self.client.resume_server(self.server_id)
@test.idempotent_id('221cd282-bddb-4837-a683-89c2487389b6')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative'])
def test_resume_non_existent_server(self):
# resume a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.resume_server,
nonexistent_server)
@test.idempotent_id('ccb6294d-c4c9-498f-8a43-554c098bfadb')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative'])
def test_resume_server_invalid_state(self):
# resume an active server.
self.assertRaises(lib_exc.Conflict,
self.client.resume_server,
self.server_id)
@test.attr(type=['negative'])
@test.idempotent_id('7dd919e7-413f-4198-bebb-35e2a01b13e9')
def test_get_console_output_of_non_existent_server(self):
# get the console output for a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.get_console_output,
nonexistent_server, 10)
@test.attr(type=['negative'])
@test.idempotent_id('6f47992b-5144-4250-9f8b-f00aa33950f3')
def test_force_delete_nonexistent_server_id(self):
# force-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.force_delete_server,
nonexistent_server)
@test.attr(type=['negative'])
@test.idempotent_id('9c6d38cc-fcfb-437a-85b9-7b788af8bf01')
def test_restore_nonexistent_server_id(self):
# restore-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.restore_soft_deleted_server,
nonexistent_server)
@test.attr(type=['negative'])
@test.idempotent_id('7fcadfab-bd6a-4753-8db7-4a51e51aade9')
def test_restore_server_invalid_state(self):
# we can only restore-delete a server in 'soft-delete' state
self.assertRaises(lib_exc.Conflict,
self.client.restore_soft_deleted_server,
self.server_id)
@test.idempotent_id('abca56e2-a892-48ea-b5e5-e07e69774816')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative'])
def test_shelve_non_existent_server(self):
# shelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.shelve_server,
nonexistent_server)
@test.idempotent_id('443e4f9b-e6bf-4389-b601-3a710f15fddd')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative'])
def test_shelve_shelved_server(self):
# shelve a shelved server.
self.client.shelve_server(self.server_id)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
self.client.wait_for_server_status(self.server_id,
'SHELVED_OFFLOADED',
extra_timeout=offload_time)
else:
self.client.wait_for_server_status(self.server_id,
'SHELVED')
server = self.client.get_server(self.server_id)
image_name = server['name'] + '-shelved'
params = {'name': image_name}
images = self.images_client.list_images(**params)
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
self.assertRaises(lib_exc.Conflict,
self.client.shelve_server,
self.server_id)
self.client.unshelve_server(self.server_id)
@test.idempotent_id('23d23b37-afaf-40d7-aa5d-5726f82d8821')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.unshelve_server,
nonexistent_server)
@test.idempotent_id('8f198ded-1cca-4228-9e65-c6b449c54880')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative'])
def test_unshelve_server_invalid_state(self):
# unshelve an active server.
self.assertRaises(lib_exc.Conflict,
self.client.unshelve_server,
self.server_id)
| apache-2.0 | 8,421,193,079,053,598,000 | 40.835968 | 78 | 0.617318 | false |
dbousque/lymp | srcs/lymp.py | 1 | 6462 |
from time import time
from struct import pack, unpack
import bson, sys, os, codecs
from random import randint
from traceback import print_exc
def int_to_int64_bytes(i):
return pack('>q', i)
def py_to_bson(val):
if type(val) is int:
return bson.int64.Int64(val)
if sys.version_info.major == 2 and type(val) is str:
return bson.binary.Binary(val)
return val
def exit_lymp():
# closing 'python_log'
sys.stdout.close()
exit(0)
# A communication class, could be implemented using other ipc methods,
# it only needs the methods 'send_bytes' and 'get_bytes'
class PipeReaderWriter:
def __init__(self, read_pipe_name, write_pipe_name):
self.get_pipes(read_pipe_name, write_pipe_name)
def get_pipes(self, read_pipe_name, write_pipe_name):
# Order of open matters, since it is blocking, should match OCaml order
# 0 to be unbuffered, so we don't have to flush (better performance ?)
self.write_pipe = open(write_pipe_name, 'wb', 0)
self.read_pipe = open(read_pipe_name, 'rb', 0)
def send_bytes(self, byts):
# '>q' to force signed 8 bytes integer
self.write_pipe.write(pack('>q', len(byts)))
#self.write_pipe.flush()
self.write_pipe.write(byts)
#self.write_pipe.flush()
def get_bytes(self):
# '>q' to force signed 8 bytes integer
try:
nb_bytes = unpack('>q', self.read_pipe.read(8))[0]
except:
# ocaml process has been terminated
exit_lymp()
byts = b'' if sys.version_info.major == 3 else ""
while len(byts) < nb_bytes:
byts += self.read_pipe.read(nb_bytes)
return byts
class ExecutionHandler:
to_ret_types = {
int: "i",
tuple: "t",
list: "l",
str: "s",
float: "f",
type(None): "n",
bool: "b",
bytes: "B"
}
# for python 2, unicode is str and str is bytes
if sys.version_info.major == 2:
to_ret_types[unicode] = "s"
to_ret_types[str] = "B"
def __init__(self, reader_writer):
self.reader_writer = reader_writer
self.modules = {}
self.objs = {}
self.ref_nb = 0
def loop(self):
# don't recursively call .loop, to avoid stack overflow
while True:
command_bytes = self.reader_writer.get_bytes()
if command_bytes == b'done':
exit_lymp()
instruction = bson.BSON.decode(bson.BSON(command_bytes))
try:
ret = self.execute_instruction(instruction)
# data may still be in the buffer
sys.stdout.flush()
self.send_ret(ret, ret_ref=("R" in instruction))
except BaseException as e:
# exception whilst executing, inform ocaml side
print_exc()
# data may still be in the buffer
sys.stdout.flush()
self.send_ret("", exception=True)
def ret_to_msg(self, ret, ret_ref):
msg = {}
# reference (type not supported or explicitely asked to)
if ret_ref or (type(ret) not in self.to_ret_types):
self.ref_nb += 1
self.objs[self.ref_nb] = ret
msg["t"] = "r"
msg["v"] = bson.code.Code(str(self.ref_nb))
else:
msg["t"] = self.to_ret_types[type(ret)]
# tuples are just like lists, but their type "t" is "t" instead of "l"
if type(ret) is tuple:
ret = list(ret)
# if type is list, further resolve
if type(ret) is list:
msg["v"] = []
for elt in ret:
# ret_ref is false here (would not be in the else otherwise)
msg["v"].append(self.ret_to_msg(elt, False))
else:
msg["v"] = py_to_bson(ret)
return msg
def send_ret(self, ret, exception=False, ret_ref=False):
if exception:
msg = {}
msg["t"] = "e"
msg["v"] = ""
else:
msg = self.ret_to_msg(ret, ret_ref)
msg = bytes(bson.BSON.encode(msg))
self.reader_writer.send_bytes(msg)
def resolve_args(self, args):
named = {}
i = 0
for arg in args:
# resolve named args (list of size 2, first one being a bson.code.Code starting with "!")
if type(arg) is list and len(arg) == 2 and type(arg[0]) is bson.code.Code and str(arg[0])[0] == "!":
named[str(arg[0])[1:]] = self.resolve_args([arg[1]])[0][0]
del args[i]
continue
# if bytes
if type(arg) is bson.binary.Binary:
args[i] = bytes(arg)
# resolve reference args (using bson jscode)
if type(arg) is bson.code.Code:
args[i] = self.objs[int(arg)]
if type(arg) is bson.int64.Int64:
args[i] = int(arg)
# for python 2, if arg is str, convert to unicode
if sys.version_info.major == 2 and type(arg) is str:
args[i] = args[i].decode('utf-8')
# for python 2, if arg is bytes, convert to str
if sys.version_info.major == 2 and type(arg) is bson.binary.Binary:
args[i] = str(arg)
# if we have a list, we must recursively resolve
if type(arg) is list:
args[i] = self.resolve_args(arg)[0]
# if we have a dict, it is a tuple inside "v"
if type(arg) is dict:
args[i] = tuple(self.resolve_args(arg["v"])[0])
i += 1
return args, named
def execute_instruction(self, instruction):
if "r" in instruction:
# if we are asked to realease an inexisting of already released reference
if "d" in instruction and instruction["r"] not in self.objs:
return None
# module is the object referenced, later we call getattr to get the method called
module = self.objs[instruction["r"]]
# if we were asked to 'detach' (release) the reference
if "d" in instruction:
del self.objs[instruction["r"]]
return None
# if we were asked to return the reference
if "g" in instruction:
return module
else:
# python 2 builtin module has a different name
if sys.version_info.major == 2 and instruction["m"] == "builtins":
instruction["m"] = "__builtin__"
if instruction["m"] not in self.modules:
__import__(instruction["m"])
self.modules[instruction["m"]] = sys.modules[instruction["m"]]
module = self.modules[instruction["m"]]
# set attribute
if "s" in instruction:
args, named = self.resolve_args(instruction["a"])
arg = args[0]
setattr(module, instruction["f"], arg)
return None
func_or_attr = getattr(module, instruction["f"])
# get attribute
if "t" in instruction:
return func_or_attr
args = instruction["a"]
args, named = self.resolve_args(args)
ret = func_or_attr(*args, **named)
return ret
working_directory = sys.argv[1]
write_pipe_path = sys.argv[2]
read_pipe_path = sys.argv[3]
# changing dir
os.chdir(working_directory)
sys.path.insert(0, working_directory)
# redirect stdout to 'python_log'
sys.stdout = codecs.open('python_log', 'w', encoding='utf-8')
sys.stderr = sys.stdout
communication = PipeReaderWriter(read_pipe_path, write_pipe_path)
handler = ExecutionHandler(communication)
handler.loop()
| mit | -693,447,540,580,453,000 | 29.625592 | 103 | 0.656608 | false |
johnjohnlin/nicotb | sim/standalone/test_semaphore.py | 1 | 1602 | #!/usr/bin/env python
# Copyright (C) 2017,2019, Yu Sheng Lin, [email protected]
# This file is part of Nicotb.
# Nicotb is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Nicotb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Nicotb. If not, see <http://www.gnu.org/licenses/>.
from nicotb import *
from nicotb.utils import Scoreboard
from nicotb.primitives import Semaphore
from nicotb.event import waiting_coro
import numpy as np
ACQ = [1,2,3,4]
SUM = sum(ACQ)
def producer():
global resource
for i in range(SUM):
for j in range(1+np.random.randint(20)):
yield ck_ev
resource += 1
yield from sem.Release()
def consumer():
global resource
scb = Scoreboard("Semaphore")
tst = scb.GetTest("test")
tst.Expect([])
for i in ACQ:
for j in range(1+np.random.randint(10)):
yield ck_ev
yield from sem.Acquire(i)
resource -= i
assert resource >= 0
tst.Get([])
scb.ReportAll()
ck_ev = CreateEvent()
resource = 0
sem = Semaphore(-1)
RegisterCoroutines([
producer(),
consumer(),
])
for i in range(1000):
SignalEvent(ck_ev)
MainLoop()
if not waiting_coro[ck_ev]:
break
print("Simulation stop at {}".format(i))
| gpl-3.0 | 9,214,477,608,298,877,000 | 24.83871 | 72 | 0.721598 | false |
mkauppila/rachel | bot/parse.py | 1 | 1792 | import irc
def parse_nick(nick):
""" Separates nick from the mode characters.
Examples:
parse_nick('@_markus') => (_markus, 'o')
parse_nick('+_markus') => ('_markus', 'v')
"""
converter = {'@' : 'o', '+' : 'v'}
modes = converter.keys()
first_character = nick[0]
if first_character in modes:
return (nick[1:], converter[first_character])
else:
return (nick, None)
def parse_nick_from_prefix(prefix):
""" Parse nick from the beginning of message prefix
Used by JOIN and PART message handlers.
"""
end_index = prefix.find('!')
return prefix[0:end_index]
def parse_messages_from(data):
""" Separate server messages
"""
return data.split('\r\n')
def parse_message(message):
""" Parse messages from IRC server.
Message format is:
[:prefix] command [[param1] param2] [:trailing]
Only command is mandatory, other parts are optional.
Args:
Message: Server message that'll be parsed
Returns:
Message object containing the parsed information.
"""
if not message or message == '':
return None
prefix, command, params, trailing = None, None, None, None
# parse prefix
if message[0] == ':':
end_index = message.find(' ')
prefix = message[1:end_index]
# remove the parsed section of the message and the whitespace
message = message[end_index + 1:]
# parse trailing
start_index_of_trailing = message.find(':')
if start_index_of_trailing != -1: # has trailing
trailing = message[start_index_of_trailing + 1:]
# update the message, only command and params left
message = message[0:start_index_of_trailing]
# remove redundant white space
message = message.strip(' ')
command_and_params = message.split(' ')
command = command_and_params[0]
params = command_and_params[1:]
return irc.Message(prefix, command, params, trailing)
| mit | -63,711,807,403,173,780 | 23.547945 | 63 | 0.682478 | false |
ImmaculateObsession/nest | pebbles/forms.py | 1 | 3224 | from django import forms
from django.utils.text import slugify
from suit_redactor.widgets import RedactorWidget
from pebbles.models import (
Pebble,
PebblePage,
)
class PebblePageForm(forms.Form):
title = forms.CharField(
max_length=140,
required=True,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
slug = forms.SlugField(
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
is_live = forms.BooleanField(initial=False, required=False)
standalone = forms.BooleanField(initial=False, required=False)
content = forms.CharField(
required=False,
widget=RedactorWidget,
)
def __init__(self, *args, **kwargs):
selected_pebble = None
if kwargs.get('selected_pebble'):
selected_pebble = kwargs.pop('selected_pebble')
self.pebbles = kwargs.pop('pebbles')
super(PebblePageForm, self).__init__(*args, **kwargs)
choices = [(pebble.id, pebble.title) for pebble in self.pebbles]
if choices and not selected_pebble:
selected_pebble = choices[0][0]
self.fields['pebble'] = forms.ChoiceField(
choices=choices,
initial=selected_pebble,
widget=forms.Select(attrs={'class':'form-control',}),
)
def clean_slug(self):
slug = self.cleaned_data.get('slug')
if not slug or slug == '':
slug = slugify(self.cleaned_data['title'])
return slug
def clean(self):
cleaned_data = self.cleaned_data
slug = cleaned_data.get('slug')
pebble = Pebble.objects.get(id=cleaned_data.get('pebble'))
if slug != self.initial.get('slug') and PebblePage.objects.filter(pebble=pebble, slug=slug).exists():
raise forms.ValidationError("Slug matches an existing page")
return cleaned_data
class PebbleSettingsForm(forms.Form):
site_title = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
facebook_page = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
twitter_page = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
youtube_channel = forms.CharField(
max_length=100,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
tagline = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
show_rss = forms.BooleanField()
copyright = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
feed_description = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
feed_title = forms.CharField(
max_length=140,
required=False,
widget=forms.TextInput(attrs={'class':'form-control',}),
)
| mit | 3,485,214,062,149,560,000 | 29.130841 | 109 | 0.611042 | false |
philroche/Django-tinymce-filebrowser | mce_filebrowser/models.py | 1 | 1228 | import datetime
from django.db import models
from django.utils.translation import ugettext as _
from mce_filebrowser.conf import LOCAL_MCE_FILEBROWSER_UPLOADDIR,LOCAL_MCE_FILEBROWSER_PERUSER
def content_file_name(instance, filename):
if LOCAL_MCE_FILEBROWSER_PERUSER == True:
return "%s/%s/%s/%s" %(LOCAL_MCE_FILEBROWSER_UPLOADDIR,'user-%s' % str(instance.user_id), datetime.datetime.now().strftime("%Y/%m/%d"), filename)
else:
return "%s/%s/%s" %(LOCAL_MCE_FILEBROWSER_UPLOADDIR, datetime.datetime.now().strftime("%Y/%m/%d"), filename)
class FileBrowserFile(models.Model):
""" Uploaded file model """
FILE_TYPES = (
('img', _('Image')),
('doc', _('Document')),
)
file_type = models.CharField(max_length=3, choices=FILE_TYPES)
uploaded_file = models.FileField(
upload_to=content_file_name,
verbose_name = _('File / Image'),
max_length=300,
)
create_date = models.DateTimeField(
auto_now_add=True, verbose_name=_('Create date')
)
user_id = models.IntegerField(null=True, blank=True, verbose_name=_('Who does this file belong to?'))
def __unicode__(self):
return u'%s' % self.uploaded_file.name
| mit | -6,621,422,880,594,318,000 | 33.111111 | 153 | 0.645765 | false |
wrightjb/bolt-planar | setup.py | 1 | 2408 | # setup.py for planar
#
# $Id$
import os
import sys
import shutil
from distutils.core import setup, Extension
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
if sys.version_info >= (3, 0):
raise ImportError("build_py_2to3 not found in distutils - it is required for Python 3.x")
from distutils.command.build_py import build_py
suffix = ""
else:
suffix = "-py3k"
srcdir = os.path.dirname(__file__)
def read(fname):
return open(os.path.join(srcdir, fname)).read()
include_dirs = ['include']
extra_compile_args = []
if 'SETUP_PY_CFLAGS' in os.environ:
# SETUP_PY_CFLAGS allows you to pass in CFLAGS
# in a disutils-friendly way. Using CFLAGS directly
# causes linking to fail for some python versions
extra_compile_args.append(os.environ['SETUP_PY_CFLAGS'])
setup(
name='planar',
version='0.4', # *** REMEMBER TO UPDATE __init__.py ***
description='2D planar geometry library for Python.',
long_description=read('README.txt'),
provides=['planar'],
author='Casey Duncan',
author_email='[email protected]',
url='http://bitbucket.org/caseman/planar/',
license='BSD',
classifiers = [
'Development Status :: 3 - Alpha',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
],
platforms = 'any',
package_dir={'planar': 'lib/planar',
'planar.test': 'test'},
packages=['planar', 'planar.test'],
ext_modules=[
Extension('planar.c',
['lib/planar/cmodule.c',
'lib/planar/cvector.c',
'lib/planar/ctransform.c',
'lib/planar/cline.c',
'lib/planar/cbox.c',
'lib/planar/cpolygon.c',
],
include_dirs=include_dirs,
#library_dirs=library_dirs,
#libraries=libraries,
#extra_link_args=extra_link_args,
extra_compile_args=extra_compile_args,
#define_macros=macros,
),
],
cmdclass = {'build_py': build_py},
)
| bsd-3-clause | -2,776,766,464,232,461,000 | 28.365854 | 97 | 0.634551 | false |
teddy-michel/Mimir | games/forms.py | 1 | 2992 | from django.forms import ModelForm, Textarea, HiddenInput, IntegerField, CharField, Select
from django.utils.translation import ugettext as _
from base.models import Tag
from .models import Game, Saga, SagaGame, GameAttribute, GameLink, GameTag, GameUser
class GameForm(ModelForm):
class Meta:
model = Game
fields = ["title", "title_vo", "year", "infos", "image"]
widgets = {
"infos": Textarea(attrs={"rows": 4}),
}
class GameAttributeForm(ModelForm):
class Meta:
model = GameAttribute
fields = ["name", "value"]
class GameLinkForm(ModelForm):
class Meta:
model = GameLink
fields = ["name", "uri", "lang"]
class GameTagForm(ModelForm):
tag_name = CharField(label=_("Tag name"), max_length=100)
def __init__(self, *args, **kwargs):
super(ModelForm, self).__init__(*args, **kwargs)
self.fields["tag"].required = False
if hasattr(self.instance, "tag"):
self.initial["tag_name"] = self.instance.tag.name
else:
self.initial["tag_name"] = ""
def save(self, commit=True):
name = self.cleaned_data.get("tag_name").strip()
if self.initial["tag_name"] != name:
tag = Tag.objects.get_or_create(name=name)[0]
self.instance.tag = tag
return super(ModelForm, self).save(commit=commit)
class Meta:
model = GameTag
fields = ["tag", "tag_name", "infos"]
widgets = {
"tag": HiddenInput(),
}
class SagaForm(ModelForm):
class Meta:
model = Saga
fields = ["title", "title_vo", "infos"]
widgets = {
"infos": Textarea(attrs={"rows": 4}),
}
class SagaGameForm(ModelForm):
game_id = IntegerField(label=_("Title"), widget=Select(attrs={"class": "select_game"}), required=False)
game_title = CharField(widget=HiddenInput(), max_length=150, required=False)
def __init__(self, *args, **kwargs):
super(ModelForm, self).__init__(*args, **kwargs)
if hasattr(self.instance, "game"):
self.initial["game_id"] = self.instance.game.id
self.initial["game_title"] = self.instance.game.title
if self.instance.game.year:
self.initial["game_title"] += " (%s)" % self.instance.game.year
else:
self.initial["game_id"] = None
self.initial["game_title"] = ""
def save(self, commit=True):
if self.instance.game is None and self.cleaned_data.get("game_id"):
self.instance.game = Game.objects.get(id=int(self.cleaned_data.get("game_id")))
return super(ModelForm, self).save(commit=commit)
class Meta:
model = SagaGame
fields = ["game_id", "game_title", "game"]
widgets = {
"game": HiddenInput(),
}
class GameUserForm(ModelForm):
class Meta:
model = GameUser
fields = ["bought", "played", "finished"]
| gpl-3.0 | -415,004,628,209,892,350 | 28.048544 | 107 | 0.580548 | false |
ganeshgore/myremolab | server/launch/sample/main_machine/main_instance/laboratory/server_config.py | 1 | 7958 | ##################################
# Laboratory Server configuration #
##################################
laboratory_assigned_experiments = {
'exp1:ud-fpga@FPGA experiments':
{
'coord_address': 'experiment_fpga:main_instance@main_machine',
'checkers': ()
},
'exp1:ud-demo-fpga@FPGA experiments':
{
'coord_address': 'experiment_demo_fpga:main_instance@main_machine',
'checkers': ()
},
'exp1:ud-pld@PLD experiments':
{
'coord_address': 'experiment_pld:main_instance@main_machine',
'checkers': ()
},
'exp1:ud-demo-pld@PLD experiments':
{
'coord_address': 'experiment_demo_pld:main_instance@main_machine',
'checkers': (),
},
'exp1:ud-demo-xilinx@Xilinx experiments':
{
'coord_address': 'experiment_demo_xilinx1:main_instance@main_machine',
'checkers': (),
},
'exp2:ud-demo-xilinx@Xilinx experiments':
{
'coord_address': 'experiment_demo_xilinx2:main_instance@main_machine',
'checkers': (),
},
'exp1:ud-gpib@GPIB experiments':
{
'coord_address': 'experiment_gpib:main_instance@main_machine',
'checkers': (),
},
'exp1:ud-dummy@Dummy experiments':
{
'coord_address': 'experiment_dummy:main_instance@main_machine',
'checkers': ()
},
'exp1:ud-dummy-batch@Dummy experiments':
{
'coord_address': 'experiment_dummy_batch:main_instance@main_machine',
'checkers': ()
},
'exp2:ud-dummy@Dummy experiments':
{
'coord_address': 'experiment_dummy:main_instance@main_machine',
'checkers': ()
},
'exp1:ud-logic@PIC experiments':
{
'coord_address': 'experiment_logic:main_instance@main_machine',
'checkers': ()
},
'exp1:flashdummy@Dummy experiments':
{
'coord_address': 'experiment_flashdummy:main_instance@main_machine',
'checkers': ()
},
'exp1:javadummy@Dummy experiments':
{
'coord_address': 'experiment_javadummy:main_instance@main_machine',
'checkers': ()
},
'exp1:jsdummy@Dummy experiments':
{
'coord_address': 'experiment_jsdummy:main_instance@main_machine',
'checkers': ()
},
'exp1:jsfpga@FPGA experiments':
{
'coord_address': 'experiment_jsfpga:main_instance@main_machine',
'checkers': ()
},
'exp1:visirtest@Dummy experiments':
{
'coord_address': 'experiment_testvisir:main_instance@main_machine',
'checkers': ()
},
'exp2:visirtest@Dummy experiments':
{
'coord_address': 'experiment_testvisir:main_instance@main_machine',
'checkers': ()
},
'exp3:visirtest@Dummy experiments':
{
'coord_address': 'experiment_testvisir:main_instance@main_machine',
'checkers': ()
},
'exp4:visirtest@Dummy experiments':
{
'coord_address': 'experiment_testvisir:main_instance@main_machine',
'checkers': ()
},
'exp5:visirtest@Dummy experiments':
{
'coord_address': 'experiment_testvisir:main_instance@main_machine',
'checkers': ()
},
'exp1:visir@Visir experiments':
{
'coord_address': 'experiment_testvisir:main_instance@main_machine',
'checkers': ()
},
'exp2:visir@Visir experiments':
{
'coord_address': 'experiment_testvisir:main_instance@main_machine',
'checkers': ()
},
'exp3:visir@Visir experiments':
{
'coord_address': 'experiment_testvisir:main_instance@main_machine',
'checkers': ()
},
'exp4:visir@Visir experiments':
{
'coord_address': 'experiment_testvisir:main_instance@main_machine',
'checkers': ()
},
'exp5:visir@Visir experiments':
{
'coord_address': 'experiment_testvisir:main_instance@main_machine',
'checkers': ()
},
'exp1:vm@Dummy experiments':
{
'coord_address': 'experiment_vm:main_instance@main_machine',
'checkers': ()
},
'exp1:vm-win@Dummy experiments':
{
'coord_address': 'experiment_vm_win:main_instance@main_machine',
'checkers': ()
},
'exp1:submarine@Submarine experiments':
{
'coord_address': 'experiment_submarine:main_instance@main_machine',
'checkers': ()
},
'exp1:aquarium@Aquatic experiments':
{
'coord_address': 'experiment_aquarium:main_instance@main_machine',
'checkers': ()
},
'exp1:unr-physics@Physics experiments':
{
'coord_address': 'experiment_unr:main_instance@main_machine',
'checkers': ()
},
'exp1:robot-standard@Robot experiments':
{
'coord_address': 'experiment_robot_standard:main_instance@main_machine',
'checkers': ()
},
'exp1:robot-movement@Robot experiments':
{
'coord_address': 'experiment_robot_movement:main_instance@main_machine',
'checkers': ()
},
'exp1:robot-proglist@Robot experiments':
{
'coord_address': 'experiment_robot_proglist:main_instance@main_machine',
'checkers': ()
},
'exp1:robotarm@Robot experiments':
{
'coord_address': 'experiment_robotarm:main_instance@main_machine',
'checkers': ()
},
'exp1:blink-led@LabVIEW experiments':
{
'coord_address': 'experiment_blinkled:main_instance@main_machine',
'checkers': ()
},
'exp1:ud-pic18@PIC experiments':
{
'coord_address' : 'experiment_pic18:main_instance@main_machine',
'checkers': ()
},
'exp1:binary@Games':
{
'coord_address' : 'experiment_binary:main_instance@main_machine',
'checkers': ()
},
'exp1:control-app@Control experiments':
{
'coord_address' : 'experiment_controlapp:main_instance@main_machine',
'checkers': (),
'manages_polling' : True,
},
'exp1:incubator@Farm experiments':
{
'coord_address' : 'experiment_incubator:main_instance@main_machine',
'checkers': ()
},
'exp1:robot-maze@Robot experiments':
{
'coord_address' : 'experiment_robot_maze:main_instance@main_machine',
'checkers': ()
},
'exp1:http@HTTP experiments':
{
'coord_address' : 'experiment_http:main_instance@main_machine',
'checkers': (),
'manages_polling' : True,
},
}
| bsd-2-clause | 2,023,476,635,068,262,100 | 36.186916 | 89 | 0.468082 | false |
acutesoftware/AIKIF | aikif/lib/cls_goal_time.py | 1 | 1843 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# cls_goal_time.py
from aikif.lib.cls_goal import Goal
class GoalTime(Goal):
"""
goals around time - eg maximise use of object / minimize time of task
"""
def __init__(self, maximise=True, current_val=0, target_val=0):
"""
set maximise = True for class to find maximum time (usage) or
set to False to minimise the amount of time (eg reduce task time)
"""
self.current_val = current_val
self.target_val = target_val
self.maximise = maximise
self.strategy = [
{'name':'Travel_walk', 'speed':1, 'max_km_day':30, 'dest_flexibility':100, 'money_cost':0, 'environ_cost':0},
{'name':'Travel_bike', 'speed':5, 'max_km_day':200, 'dest_flexibility':50, 'money_cost':0, 'environ_cost':0},
{'name':'Travel_car', 'speed':60, 'max_km_day':1500, 'dest_flexibility':30, 'money_cost':50, 'environ_cost':50},
{'name':'Travel_bus', 'speed':60, 'max_km_day':1500, 'dest_flexibility':20, 'money_cost':10, 'environ_cost':15}
]
def check_for_success(self):
if self.maximise:
if self.current_val > self.target_val:
return False
else:
return True
else:
if self.current_val <= self.target_val:
return False
else:
return True
def run_plan(self, strategy):
"""
executes a plan by running the passed strategy
and then updates the local results
"""
print ("TODO running strategy : " + strategy['name'] )
def find_best_plan(self):
"""
try each strategy with different amounts
"""
for strat in self.strategy:
self.run_plan(strat)
| gpl-3.0 | -5,118,131,896,329,419,000 | 33.773585 | 124 | 0.546934 | false |
domob1812/huntercore | test/functional/auxpow_mining.py | 1 | 6838 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 Daniel Kraft
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test the merge-mining RPC interface:
# getauxblock, createauxblock, submitauxblock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework import auxpow
class AuxpowMiningTest (BitcoinTestFramework):
def set_test_params (self):
self.num_nodes = 2
def add_options (self, parser):
parser.add_option ("--segwit", dest="segwit", default=False,
action="store_true",
help="Test behaviour with SegWit active")
def run_test (self):
# Enable mock time to be out of IBD.
self.enable_mocktime ()
# Activate segwit if requested.
if self.options.segwit:
self.nodes[0].generate (500)
self.sync_all ()
# Test with getauxblock and createauxblock/submitauxblock.
self.test_getauxblock ()
self.test_create_submit_auxblock ()
def test_common (self, create, submit):
"""
Common test code that is shared between the tests for getauxblock and the
createauxblock / submitauxblock method pair.
"""
# Verify data that can be found in another way.
auxblock = create ()
assert_equal (auxblock['chainid'], 6)
assert_equal (auxblock['algo'], 0)
assert_equal (auxblock['height'], self.nodes[0].getblockcount () + 1)
assert_equal (auxblock['previousblockhash'],
self.nodes[0].getblockhash (auxblock['height'] - 1))
# Calling again should give the same block.
# Try using the (default) algo parameter.
auxblock2 = create (0)
assert_equal (auxblock2, auxblock)
# If we receive a new block, the old hash will be replaced.
self.sync_all ()
self.nodes[1].generate (1)
self.sync_all ()
auxblock2 = create ()
assert auxblock['hash'] != auxblock2['hash']
assert_raises_rpc_error (-8, 'block hash unknown', submit,
auxblock['hash'], "x")
# Invalid format for auxpow.
assert_raises_rpc_error (-1, None, submit,
auxblock2['hash'], "x")
# Invalidate the block again, send a transaction and query for the
# auxblock to solve that contains the transaction.
self.nodes[0].generate (1)
addr = self.nodes[1].getnewaddress ()
txid = self.nodes[0].sendtoaddress (addr, 1)
self.sync_all ()
assert_equal (self.nodes[1].getrawmempool (), [txid])
auxblock = create ()
target = auxpow.reverseHex (auxblock['_target'])
# Compute invalid auxpow.
apow = auxpow.computeAuxpow (auxblock['hash'], target, False)
res = submit (auxblock['hash'], apow)
assert not res
# Compute and submit valid auxpow.
apow = auxpow.computeAuxpow (auxblock['hash'], target, True)
res = submit (auxblock['hash'], apow)
assert res
# Make sure that the block is indeed accepted.
self.sync_all ()
assert_equal (self.nodes[1].getrawmempool (), [])
height = self.nodes[1].getblockcount ()
assert_equal (height, auxblock['height'])
assert_equal (self.nodes[1].getblockhash (height), auxblock['hash'])
# Call getblock and verify the auxpow field.
data = self.nodes[1].getblock (auxblock['hash'])
assert 'auxpow' in data
auxJson = data['auxpow']
assert_equal (auxJson['index'], 0)
assert_equal (auxJson['chainindex'], 0)
assert_equal (auxJson['merklebranch'], [])
assert_equal (auxJson['chainmerklebranch'], [])
assert_equal (auxJson['parentblock'], apow[-160:])
# Also previous blocks should have 'auxpow', since all blocks (also
# those generated by "generate") are merge-mined.
oldHash = self.nodes[1].getblockhash (100)
data = self.nodes[1].getblock (oldHash)
assert 'auxpow' in data
# Check that it paid correctly to the first node.
t = self.nodes[0].listtransactions ("*", 1)
assert_equal (len (t), 1)
t = t[0]
assert_equal (t['category'], "immature")
assert_equal (t['blockhash'], auxblock['hash'])
assert t['generated']
assert_greater_than_or_equal (t['amount'], Decimal ("0.05"))
assert_equal (t['confirmations'], 1)
# Verify the coinbase script. Ensure that it includes the block height
# to make the coinbase tx unique. The expected block height is around
# 200, so that the serialisation of the CScriptNum ends in an extra 00.
# The vector has length 2, which makes up for 02XX00 as the serialised
# height. Check this. (With segwit, the height is different, so we skip
# this for simplicity.)
if not self.options.segwit:
blk = self.nodes[1].getblock (auxblock['hash'])
tx = self.nodes[1].getrawtransaction (blk['tx'][0], 1)
coinbase = tx['vin'][0]['coinbase']
assert_equal ("02%02x00" % auxblock['height'], coinbase[0 : 6])
def test_getauxblock (self):
"""
Test the getauxblock method.
"""
create = self.nodes[0].getauxblock
submit = self.nodes[0].getauxblock
self.test_common (create, submit)
# Ensure that the payout address is changed from one block to the next.
hash1 = auxpow.mineAuxpowBlockWithMethods (create, submit)
hash2 = auxpow.mineAuxpowBlockWithMethods (create, submit)
self.sync_all ()
addr1 = auxpow.getCoinbaseAddr (self.nodes[1], hash1)
addr2 = auxpow.getCoinbaseAddr (self.nodes[1], hash2)
assert addr1 != addr2
info = self.nodes[0].getaddressinfo (addr1)
assert info['ismine']
info = self.nodes[0].getaddressinfo (addr2)
assert info['ismine']
def test_create_submit_auxblock (self):
"""
Test the createauxblock / submitauxblock method pair.
"""
# Check for errors with wrong parameters.
assert_raises_rpc_error (-1, None, self.nodes[0].createauxblock)
assert_raises_rpc_error (-5, "Invalid coinbase payout address",
self.nodes[0].createauxblock,
"this_an_invalid_address")
# Fix a coinbase address and construct methods for it.
coinbaseAddr = self.nodes[0].getnewaddress ()
def create (*algo):
return self.nodes[0].createauxblock (coinbaseAddr, *algo)
submit = self.nodes[0].submitauxblock
# Run common tests.
self.test_common (create, submit)
# Ensure that the payout address is the one which we specify
hash1 = auxpow.mineAuxpowBlockWithMethods (create, submit)
hash2 = auxpow.mineAuxpowBlockWithMethods (create, submit)
self.sync_all ()
addr1 = auxpow.getCoinbaseAddr (self.nodes[1], hash1)
addr2 = auxpow.getCoinbaseAddr (self.nodes[1], hash2)
assert_equal (addr1, coinbaseAddr)
assert_equal (addr2, coinbaseAddr)
if __name__ == '__main__':
AuxpowMiningTest ().main ()
| mit | 2,870,474,276,787,400,000 | 35.962162 | 77 | 0.659842 | false |
algorhythms/LeetCode | 276 Paint Fence.py | 1 | 3181 | """
Premium Question
"""
__author__ = 'Daniel'
class Solution(object):
def numWays_oneliner(self, n, k):
return 0 if n < 1 else sum(reduce(lambda F, i: [(k-1)*(F[0]+F[1]), F[0]], xrange(1, n), [k, 0]))
def numWays(self, n, k):
"""
You need to abstract number of colors to binary value (is different color)
Let F1[i] be the number of ways for A[:i] with last two with different colors
F2[i] be the number of ways for A[:i] with last two with same color
F1[i] = (k-1)*(F1[i-1]+F2[i-1])
F2[i] = F1[i-1]
Optimize the space since only depends on i and i-1
:type n: int
:type k: int
:rtype: int
"""
if n < 1:
return 0
num_diff = k
num_same = 0
for _ in xrange(1, n):
num_diff, num_same = (k-1)*(num_diff+num_same), num_diff
return num_diff+num_same
def numWays_MLE2(self, n, k):
"""
DP
Let F[i][j][l] be the number of ways of painting for A[:i] with A[i-1] as color j and A[i-2] as color l
:type n: int
:type k: int
:rtype: int
"""
if n < 1:
return 0
F = [[[0 for _ in xrange(k)] for _ in xrange(k)] for _ in xrange(2)]
EMPTY = 0
for j0 in xrange(k):
F[1][j0][EMPTY] = 1
for i in xrange(2, n+1):
for j0 in xrange(k):
for j1 in xrange(k):
F[i%2][j0][j1] = 0
for j0 in xrange(k):
for j1 in xrange(k):
for j2 in xrange(k):
if i == 2:
F[i%2][j0][j1] = F[(i-1)%2][j1][EMPTY]
elif j1 == j2 and j0 != j1:
F[i%2][j0][j1] += F[(i-1)%2][j1][j2]
elif j1 != j2:
F[i%2][j0][j1] += F[(i-1)%2][j1][j2]
ret = 0
for j0 in xrange(k):
for j1 in xrange(k):
ret += F[n%2][j0][j1]
return ret
def numWays_MLE(self, n, k):
"""
DP
let F[i][j][l] be the number of ways of painting for A[:i] with A[i-1] as color j and A[i-2] as color l
:type n: int
:type k: int
:rtype: int
"""
if n < 1:
return 0
F = [[[0 for _ in xrange(k)] for _ in xrange(k)] for _ in xrange(n+1)]
EMPTY = 0
for j0 in xrange(k):
F[1][j0][EMPTY] = 1
for i in xrange(2, n+1):
for j0 in xrange(k):
for j1 in xrange(k):
for j2 in xrange(k):
if i == 2:
F[i][j0][j1] = F[i-1][j1][EMPTY]
elif j1 == j2 and j0 != j1:
F[i][j0][j1] += F[i-1][j1][j2]
elif j1 != j2:
F[i][j0][j1] += F[i-1][j1][j2]
ret = 0
for j0 in xrange(k):
for j1 in xrange(k):
ret += F[n][j0][j1]
return ret
if __name__ == "__main__":
assert Solution().numWays(3, 2) == 6
| mit | 631,004,810,354,814,800 | 26.188034 | 111 | 0.408362 | false |
Jc2k/libcloudcore | libcloudcore/driver.py | 1 | 2268 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .error_parser import ErrorParser
from .validation import Validation
logger = logging.getLogger(__name__)
class Driver(ErrorParser, Validation):
def _get_params_for(self, target, shape, params):
result = {}
for member in shape.iter_members():
if member.target == target and member.name in params:
result[member.name] = params[member.name]
return result
def before_call(self, request, operation, **params):
request.scheme = operation.http['scheme']
request.host = operation.http['host']
request.port = operation.http['port']
request.uri = operation.http['uri'].lstrip("/").format(**params)
request.method = operation.http['method']
request.query = self._get_params_for(
"query",
operation.input_shape,
params,
)
super(Driver, self).before_call(request, operation, **params)
logger.debug("{}: {}".format(request.method, request.uri))
logger.debug(request.body)
logger.debug(request.headers)
def after_call(self, operation, request, response):
logger.debug(response.status_code)
logger.debug(response.body)
result = {
'Metadata': {
'StatusCode': response.status_code
}
}
result.update(
super(Driver, self).after_call(operation, request, response)
)
return result
| apache-2.0 | 6,896,000,010,012,746,000 | 34.4375 | 74 | 0.659171 | false |
lailongwei/llbc | wrap/pyllbc/script/comm/Timer.py | 1 | 3690 | # -*- coding: utf-8 -*-
import time as _time
from datetime import datetime as _dt
from datetime import tzinfo as _tzinfo
from datetime import timedelta as _timedelta
import llbc
class _pyllbcGMT(_tzinfo):
"""llbc library GMT tzinfo class encapsulation"""
_delta = _timedelta(0)
def utcoffset(self, dt):
return self._delta
def tzname(self):
return "GMT+0"
def dst(self, dt):
return None
class _pyllbcLOCAL(_tzinfo):
"""llbc library LOCAL tzinfo class encapsulation"""
_delta = _timedelta(seconds=-_time.timezone)
_tzname = 'GMT +{}'.format(-_time.timezone / 3600) if _time.timezone < 0 else \
('GMT -{}'.format(_time.timezone / 3600) if _time.timezone > 0 else 'GMT +0')
def utcoffset(self, dt):
return self._delta
def tzname(self):
return self._tzname
def dst(self, dt):
return None
class pyllbcTimer(object):
"""llbc library timer class encapsulation"""
INVALID_TIMER_ID = 0
"""llbc library timer class encapsulation"""
def __init__(self, ontimeout, oncancel=None):
if not callable(ontimeout):
raise TypeError("'ontimeout' obj must callable")
if not callable(oncancel):
self.__c_obj = llbc.inl.NewPyTimer(self, ontimeout)
else:
self.__c_obj = llbc.inl.NewPyTimer(self, ontimeout, oncancel)
def __del__(self):
llbc.inl.DelPyTimer(self.__c_obj)
@property
def timerid(self):
return llbc.inl.PyTimerGetTimerId(self.__c_obj)
@property
def duetime(self):
return llbc.inl.PyTimerGetDueTime(self.__c_obj)
@property
def period(self):
return llbc.inl.PyTimerGetPeriod(self.__c_obj)
@property
def isscheduling(self):
return llbc.inl.PyTimerIsScheduling(self.__c_obj)
@property
def istimeouting(self):
return llbc.inl.PyTimerIsTimeouting(self.__c_obj)
@property
def iscancelling(self):
return llbc.inl.PyTimerIsCancelling(self.__c_obj)
@property
def ignored_dead_ref(self):
return llbc.inl.PyTimerIsIgnoredDeadRef(self.__c_obj)
@ignored_dead_ref.setter
def ignored_dead_ref(self, flag):
llbc.inl.PyTimerSetIgnoredDeadRef(self.__c_obj, flag)
def schedule(self, duetime, period=None):
"""Schedule timer"""
if period is None:
period = duetime
llbc.inl.PyTimerSchedule(self.__c_obj, duetime, period)
def schedule2(self, duetime, period, fmtstr='%Y-%m-%d %H:%M:%S'):
"""
Schedule timer, arguments is datetime type object, str type object, or numeric type object,
if duetime type is datetime type object, will use it as expire time.
if duetime type is str type, will convert to datetime type to use.
if duetime type is numeric type, will as timestamp to use, as seconds.
If not specified the tzinfo, llbc will automatic use local tzinfo to fill.
"""
if isinstance(duetime, unicode):
duetime = duetime.decode('utf-8')
if isinstance(duetime, str):
duetime = _dt.strptime(duetime, fmtstr)
if isinstance(duetime, _dt):
ts = _time.mktime(duetime.timetuple()) + duetime.microsecond / 1000000.0
else:
ts = duetime
now = _time.time()
if ts < now:
raise llbc.error('duetime[{}] < nowtime[{}], schedule timer failed'.format(duetime, _dt.fromtimestamp(now)))
self.schedule(int((ts - now) * 1000), int(period * 1000))
def cancel(self):
"""Cancel timer"""
llbc.inl.PyTimerCancel(self.__c_obj)
llbc.Timer = pyllbcTimer
| mit | -2,180,250,508,961,024,300 | 29.213115 | 120 | 0.620867 | false |
metapolator/mutatormathtools | python_modules/lib/python/ufoLib/test/test_GLIF1.py | 1 | 32659 | import unittest
from ufoLib.glifLib import GlifLibError, readGlyphFromString, writeGlyphToString
from ufoLib.test.testSupport import Glyph, stripText
# ----------
# Test Cases
# ----------
class TestGLIF1(unittest.TestCase):
def assertEqual(self, first, second, msg=None):
if isinstance(first, basestring):
first = stripText(first)
if isinstance(second, basestring):
second = stripText(second)
return super(TestGLIF1, self).assertEqual(first, second, msg=msg)
def pyToGLIF(self, py):
py = stripText(py)
glyph = Glyph()
exec py in {"glyph" : glyph, "pointPen" : glyph}
glif = writeGlyphToString(glyph.name, glyphObject=glyph, drawPointsFunc=glyph.drawPoints, formatVersion=1)
glif = "\n".join(glif.splitlines()[1:])
return glif
def glifToPy(self, glif):
glif = stripText(glif)
glif = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + glif
glyph = Glyph()
readGlyphFromString(glif, glyphObject=glyph, pointPen=glyph)
return glyph.py()
def testTopElement(self):
# not glyph
glif = """
<notglyph name="a" format="1">
<outline>
</outline>
</notglyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testName(self):
# legal
glif = """
<glyph name="a" format="1">
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# empty
glif = """
<glyph name="" format="1">
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = ""
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
# not a string
py = """
glyph.name = 1
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
def testFormat(self):
# legal
glif = """
<glyph name="a" format="1">
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# wrong number
glif = """
<glyph name="a" format="-1">
<outline>
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
# not an int
glif = """
<glyph name="a" format="A">
<outline>
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testBogusGlyphStructure(self):
# unknown element
glif = """
<glyph name="a" format="1">
<unknown />
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
# content
glif = """
<glyph name="a" format="1">
Hello World.
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testAdvance(self):
# legal: width and height
glif = """
<glyph name="a" format="1">
<advance height="200" width="100"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.width = 100
glyph.height = 200
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: width and height floats
glif = """
<glyph name="a" format="1">
<advance height="200.1" width="100.1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.width = 100.1
glyph.height = 200.1
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: width
glif = """
<glyph name="a" format="1">
<advance width="100"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.width = 100
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: height
glif = """
<glyph name="a" format="1">
<advance height="200"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.height = 200
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# illegal: not a number
glif = """
<glyph name="a" format="1">
<advance width="a"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.width = "a"
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="1">
<advance height="a"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.height = "a"
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testUnicodes(self):
# legal
glif = """
<glyph name="a" format="1">
<unicode hex="0061"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.unicodes = [97]
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
glif = """
<glyph name="a" format="1">
<unicode hex="0062"/>
<unicode hex="0063"/>
<unicode hex="0061"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.unicodes = [98, 99, 97]
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# illegal
glif = """
<glyph name="a" format="1">
<unicode hex="1.1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "zzzzzz"
glyph.unicodes = ["1.1"]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testNote(self):
glif = """
<glyph name="a" format="1">
<note>
hello
</note>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.note = "hello"
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testLib(self):
glif = """
<glyph name="a" format="1">
<outline>
</outline>
<lib>
<dict>
<key>dict</key>
<dict>
<key>hello</key>
<string>world</string>
</dict>
<key>float</key>
<real>2.5</real>
<key>int</key>
<integer>1</integer>
<key>list</key>
<array>
<string>a</string>
<string>b</string>
<integer>1</integer>
<real>2.5</real>
</array>
<key>string</key>
<string>a</string>
</dict>
</lib>
</glyph>
"""
py = """
glyph.name = "a"
glyph.lib = {"dict" : {"hello" : "world"}, "float" : 2.5, "int" : 1, "list" : ["a", "b", 1, 2.5], "string" : "a"}
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testOutline(self):
# unknown element
glif = """
<glyph name="a" format="1">
<outline>
<unknown/>
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
# content
glif = """
<glyph name="a" format="1">
<outline>
hello
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testComponent(self):
# legal
glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, 1, 4)])
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# no base
glif = """
<glyph name="a" format="1">
<outline>
<component xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
# bogus values in transformation
glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="a" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", ("a", 3, 6, 5, 1, 4)])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="a" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, "a", 6, 5, 1, 4)])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="a" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, "a", 5, 1, 4)])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="a" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, "a", 1, 4)])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="a" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, "a", 4)])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="a"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, 1, "a")])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testContour(self):
# legal: one contour
glif = """
<glyph name="a" format="1">
<outline>
<contour>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: two contours
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1" y="2" type="move"/>
<point x="10" y="20" type="line"/>
</contour>
<contour>
<point x="1" y="2" type="move"/>
<point x="10" y="20" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(10, 20)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(10, 20)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# unknown element
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<unknown/>
</contour>
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testPointCoordinates(self):
# legal: int
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1" y="-2" type="move"/>
<point x="0" y="0" type="line" name="this is here so that the contour isn't seen as an anchor"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"name" : "this is here so that the contour isn't seen as an anchor", "segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: float
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1.1" y="-2.2" type="move"/>
<point x="0" y="0" type="line" name="this is here so that the contour isn't seen as an anchor"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1.1, -2.2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"name" : "this is here so that the contour isn't seen as an anchor", "segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: int
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="a" y="2" type="move"/>
<point x="0" y="0" type="line" name="this is here so that the contour isn't seen as an anchor"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[("a", 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"name" : "this is here so that the contour isn't seen as an anchor", "segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
# legal: int
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1" y="a" type="move"/>
<point x="0" y="0" type="line" name="this is here so that the contour isn't seen as an anchor"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, "a")], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"name" : "this is here so that the contour isn't seen as an anchor", "segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testPointTypeMove(self):
# legal
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1" y="-2" type="move"/>
<point x="3" y="-4" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: smooth=True
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1" y="-2" type="move" smooth="yes"/>
<point x="3" y="-4" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : True})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# illegal: not at start
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="3" y="-4" type="line"/>
<point x="1" y="-2" type="move"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testPointTypeLine(self):
# legal
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1" y="-2" type="move"/>
<point x="3" y="-4" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: start of contour
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1" y="-2" type="line"/>
<point x="3" y="-4" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: smooth=True
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1" y="-2" type="move"/>
<point x="3" y="-4" type="line" smooth="yes"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : True})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeCurve(self):
# legal
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: start of contour
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="100" y="200" type="curve"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: smooth=True
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="curve" smooth="yes"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : True})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: no off-curves
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: 1 off-curve
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="50" y="100"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(50, 100)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# illegal: 3 off-curves
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="100"/>
<point x="35" y="125"/>
<point x="65" y="200"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 100)], **{"smooth" : False})
pointPen.addPoint(*[(35, 125)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testPointQCurve(self):
# legal
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="qcurve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: start of contour
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="100" y="200" type="qcurve"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: smooth=True
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : True})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: no off-curves
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="100" y="200" type="qcurve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: 1 off-curve
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="50" y="100"/>
<point x="100" y="200" type="qcurve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(50, 100)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: 3 off-curves
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="100"/>
<point x="35" y="125"/>
<point x="65" y="200"/>
<point x="100" y="200" type="qcurve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 100)], **{"smooth" : False})
pointPen.addPoint(*[(35, 125)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testSpecialCaseQCurve(self):
# contour with no on curve
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0"/>
<point x="0" y="100"/>
<point x="100" y="100"/>
<point x="100" y="0"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"smooth" : False})
pointPen.addPoint(*[(0, 100)], **{"smooth" : False})
pointPen.addPoint(*[(100, 100)], **{"smooth" : False})
pointPen.addPoint(*[(100, 0)], **{"smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeOffCurve(self):
# legal
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# legal: start of contour
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
# before move
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="65"/>
<point x="0" y="0" type="move"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
# before line
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="65"/>
<point x="0" y="0" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
# smooth=True
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="0" y="65" smooth="yes"/>
<point x="0" y="0" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : True})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testAnchor(self):
# legal
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1" y="2" type="move"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.anchors = [{"x" : 1, "y" : 2}]
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1" y="2" type="move" name="test"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.anchors = [{"name" : "test", "x" : 1, "y" : 2}]
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testOpenContourLooseOffCurves(self):
# a piece of software was writing this kind of structure
glif = """
<glyph name="a" format="1">
<outline>
<contour>
<point x="1" y="2" type="move"/>
<point x="1" y="2"/>
<point x="1" y="2"/>
<point x="1" y="2" type="curve"/>
<point x="1" y="2"/>
</contour>
</outline>
</glyph>
"""
expectedPy = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, 2)], **{"smooth" : False})
pointPen.addPoint(*[(1, 2)], **{"smooth" : False})
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
resultPy = self.glifToPy(glif)
self.assertEqual(resultPy, expectedPy)
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, 2)], **{"smooth" : False})
pointPen.addPoint(*[(1, 2)], **{"smooth" : False})
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, 2)], **{"smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
if __name__ == "__main__":
from robofab.test.testSupport import runTests
runTests()
| apache-2.0 | 785,241,886,212,384,300 | 25.085463 | 145 | 0.584954 | false |
juanchopanza/NeuroM | neurom/morphmath.py | 1 | 12380 | # Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Mathematical and geometrical functions used to compute morphometrics'''
import math
from itertools import combinations
import numpy as np
from neurom.core.dataformat import COLS
def vector(p1, p2):
'''compute vector between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
3-vector from p1 - p2
'''
return np.subtract(p1[COLS.XYZ], p2[COLS.XYZ])
def linear_interpolate(p1, p2, fraction):
'''Returns the point p satisfying: p1 + fraction * (p2 - p1)'''
return np.array((p1[0] + fraction * (p2[0] - p1[0]),
p1[1] + fraction * (p2[1] - p1[1]),
p1[2] + fraction * (p2[2] - p1[2])))
def interpolate_radius(r1, r2, fraction):
'''Calculate the radius that corresponds to a point P that lies at a fraction of the length
of a cut cone P1P2 where P1, P2 are the centers of the circles that bound the shape with radii
r1 and r2 respectively.
Args:
r1: float
Radius of the first node of the segment.
r2: float
Radius of the second node of the segment
fraction: float
The fraction at which the interpolated radius is calculated.
Returns: float
The interpolated radius.
Note: The fraction is assumed from point P1, not from point P2.
'''
def f(a, b, c):
''' Returns the length of the interpolated radius calculated
using similar triangles.
'''
return a + c * (b - a)
return f(r2, r1, 1. - fraction) if r1 > r2 else f(r1, r2, fraction)
def path_fraction_id_offset(points, fraction, relative_offset=False):
'''Find the segment which corresponds to the fraction
of the path length along the piecewise linear curve which
is constructed from the set of points.
Args:
points: an iterable of indexable objects with indices
0, 1, 2 correspoding to 3D cartesian coordinates
fraction: path length fraction (0.0 <= fraction <= 1.0)
relative_offset: return absolute or relative segment distance
Returns:
(segment ID, segment offset) pair.
'''
if not (0. <= fraction <= 1.0):
raise ValueError("Invalid fraction: %.3f" % fraction)
pts = np.array(points)[:, COLS.XYZ]
lengths = np.linalg.norm(np.diff(pts, axis=0), axis=1)
cum_lengths = np.cumsum(lengths)
offset = cum_lengths[-1] * fraction
seg_id = np.argmin(cum_lengths < offset)
if seg_id > 0:
offset -= cum_lengths[seg_id - 1]
if relative_offset:
offset /= lengths[seg_id]
return seg_id, offset
def path_fraction_point(points, fraction):
'''Computes the point which corresponds to the fraction
of the path length along the piecewise linear curve which
is constructed from the set of points.
Args:
points: an iterable of indexable objects with indices
0, 1, 2 correspoding to 3D cartesian coordinates
fraction: path length fraction (0 <= fraction <= 1)
Returns:
The 3D coordinates of the aforementioned point
'''
seg_id, offset = path_fraction_id_offset(points, fraction, relative_offset=True)
return linear_interpolate(points[seg_id], points[seg_id + 1], offset)
def scalar_projection(v1, v2):
'''compute the scalar projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v
'''
return np.dot(v1, v2) / np.linalg.norm(v2)
def vector_projection(v1, v2):
'''compute the vector projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v
'''
return scalar_projection(v1, v2) * v2 / np.linalg.norm(v2)
def dist_point_line(p, l1, l2):
'''compute the orthogonal distance between from the line that goes through
the points l1, l2 and the point p
Args:
p, l1, l2 : iterable
point
indices 0, 1, 2 corresponding to cartesian coordinates
'''
cross_prod = np.cross(l2 - l1, p - l1)
return np.linalg.norm(cross_prod) / np.linalg.norm(l2 - l1)
def point_dist2(p1, p2):
'''compute the square of the euclidian distance between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
The square of the euclidian distance between the points.
'''
v = vector(p1, p2)
return np.dot(v, v)
def point_dist(p1, p2):
'''compute the euclidian distance between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
The euclidian distance between the points.
'''
return np.sqrt(point_dist2(p1, p2))
def angle_3points(p0, p1, p2):
''' compute the angle in radians between three 3D points
Calculated as the angle between p1-p0 and p2-p0.
Args:
p0, p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
Angle in radians between (p1-p0) and (p2-p0).
0.0 if p0==p1 or p0==p2.
'''
vec1 = vector(p1, p0)
vec2 = vector(p2, p0)
return math.atan2(np.linalg.norm(np.cross(vec1, vec2)),
np.dot(vec1, vec2))
def polygon_diameter(points):
''' Compute the maximun euclidian distance between any two points
in a list of points
'''
return max(point_dist(p0, p1) for (p0, p1) in combinations(points, 2))
def average_points_dist(p0, p_list):
"""
Computes the average distance between a list of points
and a given point p0.
"""
return np.mean(list(point_dist(p0, p1) for p1 in p_list))
def path_distance(points):
"""
Compute the path distance from given set of points
"""
vecs = np.diff(points, axis=0)[:, :3]
d2 = [np.dot(p, p) for p in vecs]
return np.sum(np.sqrt(d2))
def segment_length(seg):
'''Return the length of a segment.
Returns: Euclidian distance between centres of points in seg
'''
return point_dist(seg[0], seg[1])
def segment_length2(seg):
'''Return the square of the length of a segment.
Returns: Square of Euclidian distance between centres of points in seg
'''
return point_dist2(seg[0], seg[1])
def segment_radius(seg):
'''Return the mean radius of a segment
Returns: arithmetic mean of the radii of the points in seg
'''
return (seg[0][COLS.R] + seg[1][COLS.R]) / 2.
def segment_x_coordinate(seg):
'''Return the mean x coordinate of a segment
Returns: arithmetic mean of the x coordinates of the points in seg
'''
return (seg[0][COLS.X] + seg[1][COLS.X]) / 2.
def segment_y_coordinate(seg):
'''Return the mean y coordinate of a segment
Returns: arithmetic mean of the y coordinates of the points in seg
'''
return (seg[0][COLS.Y] + seg[1][COLS.Y]) / 2.
def segment_z_coordinate(seg):
'''Return the mean z coordinate of a segment
Returns: arithmetic mean of the z coordinates of the points in seg
'''
return (seg[0][COLS.Z] + seg[1][COLS.Z]) / 2.
def segment_radial_dist(seg, pos):
'''Return the radial distance of a tree segment to a given point
The radial distance is the euclidian distance between the mid-point of
the segment and the point in question.
Parameters:
seg: tree segment
pos: origin to which distances are measured. It must have at lease 3
components. The first 3 components are (x, y, z).
'''
return point_dist(pos, np.divide(np.add(seg[0], seg[1]), 2.0))
def segment_area(seg):
'''Compute the surface area of a segment.
Approximated as a conical frustum. Does not include the surface area
of the bounding circles.
'''
r0 = seg[0][COLS.R]
r1 = seg[1][COLS.R]
h2 = point_dist2(seg[0], seg[1])
return math.pi * (r0 + r1) * math.sqrt((r0 - r1) ** 2 + h2)
def segment_volume(seg):
'''Compute the volume of a segment.
Approximated as a conical frustum.
'''
r0 = seg[0][COLS.R]
r1 = seg[1][COLS.R]
h = point_dist(seg[0], seg[1])
return math.pi * h * ((r0 * r0) + (r0 * r1) + (r1 * r1)) / 3.0
def taper_rate(p0, p1):
'''Compute the taper rate between points p0 and p1
Args:
p0, p1: iterables with first 4 components containing (x, y, z, r)
Returns:
The taper rate, defined as the absolute value of the difference in
the diameters of p0 and p1 divided by the euclidian distance
between them.
'''
return 2 * abs(p0[COLS.R] - p1[COLS.R]) / point_dist(p0, p1)
def segment_taper_rate(seg):
'''Compute the taper rate of a segment
Returns:
The taper rate, defined as the absolute value of the difference in
the diameters of the segment's two points divided by the euclidian
distance between them.
'''
return taper_rate(seg[0], seg[1])
def pca(points):
'''
Estimate the principal components of the covariance on the given point cloud
Input
A numpy array of points of the form ((x1,y1,z1), (x2, y2, z2)...)
Ouptut
Eigenvalues and respective eigenvectors
'''
return np.linalg.eig(np.cov(points.transpose()))
def sphere_area(r):
''' Compute the area of a sphere with radius r
'''
return 4. * math.pi * r ** 2
# Useful alias for path_distance
section_length = path_distance
def principal_direction_extent(points):
'''Calculate the extent of a set of 3D points.
The extent is defined as the maximum distance between
the projections on the principal directions of the covariance matrix
of the points.
Parameter:
points : a 2D numpy array of points
Returns:
extents : the extents for each of the eigenvectors of the cov matrix
eigs : eigenvalues of the covariance matrix
eigv : respective eigenvectors of the covariance matrix
'''
# center the points around 0.0
points = np.copy(points)
points -= np.mean(points, axis=0)
# principal components
_, eigv = pca(points)
extent = np.zeros(3)
for i in range(eigv.shape[1]):
# orthogonal projection onto the direction of the v component
scalar_projs = np.sort(np.array([np.dot(p, eigv[:, i]) for p in points]))
extent[i] = scalar_projs[-1]
if scalar_projs[0] < 0.:
extent -= scalar_projs[0]
return extent
| bsd-3-clause | -7,683,719,952,775,599,000 | 29.79602 | 98 | 0.656704 | false |
karolyi/forum-django | backend/migrate_to_django/markdownparser.py | 1 | 1130 | import html
from bs4.element import Tag
from forum.base.models import Comment
from html2text import html2text
def markdown_smilies(img_tag: Tag):
img_src = img_tag.get('src', '')
if img_src.startswith('/static/images/smiliereplace/'):
img_alt = img_tag.get('alt', '')
img_tag.replace_with(img_alt)
return
if img_src.startswith('/static/images/smilies/'):
img_tag.replace_with('[SMIL:%s]' % img_src[22:])
return
def replace_images(content: Tag):
for img_tag in content.select('img'):
markdown_smilies(img_tag)
def parse_to_markdown(content: Tag, comment_item: Comment, md_property: str):
replace_images(content)
for embed_item in content.select('div.embedded-player'):
embed_item.replace_with(embed_item.md_url)
content_md_html = content.body.decode_contents()\
.replace('></source>', '/>')\
.replace('\r\n', '\n')
md_content = html2text(content_md_html, bodywidth=0)
# Convert 2 BRs to Ps
md_content = html.unescape(md_content).replace(' \n \n', '\n\n')
setattr(comment_item, md_property, md_content)
| mit | -5,486,757,108,375,557,000 | 28.736842 | 77 | 0.643363 | false |
nextgis-extra/tests | lib_gdal/gdrivers/elas.py | 1 | 2251 | #!/usr/bin/env python
###############################################################################
# $Id: elas.py 32163 2015-12-13 17:44:50Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test ELAS driver
# Author: Even Rouault, <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2009, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Test a dataset generated by Create()
def elas_1():
tst = gdaltest.GDALTest( 'ELAS', 'byte_elas.bin', 1, 4672 )
return tst.testOpen()
###############################################################################
# Test Create()
def elas_2():
tst = gdaltest.GDALTest( 'ELAS', 'byte_elas.bin', 1, 4672 )
return tst.testCreate()
gdaltest_list = [
elas_1,
elas_2 ]
if __name__ == '__main__':
gdaltest.setup_run( 'elas' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| gpl-2.0 | -445,934,128,373,582,000 | 34.171875 | 79 | 0.593958 | false |
leppa/home-assistant | homeassistant/components/google_assistant/http.py | 1 | 8196 | """Support for Google Actions Smart Home Control."""
import asyncio
from datetime import timedelta
import logging
from uuid import uuid4
from aiohttp import ClientError, ClientResponseError
from aiohttp.web import Request, Response
import jwt
# Typing imports
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import CLOUD_NEVER_EXPOSED_ENTITIES
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util import dt as dt_util
from .const import (
CONF_API_KEY,
CONF_CLIENT_EMAIL,
CONF_ENTITY_CONFIG,
CONF_EXPOSE,
CONF_EXPOSE_BY_DEFAULT,
CONF_EXPOSED_DOMAINS,
CONF_PRIVATE_KEY,
CONF_REPORT_STATE,
CONF_SECURE_DEVICES_PIN,
CONF_SERVICE_ACCOUNT,
GOOGLE_ASSISTANT_API_ENDPOINT,
HOMEGRAPH_SCOPE,
HOMEGRAPH_TOKEN_URL,
REPORT_STATE_BASE_URL,
REQUEST_SYNC_BASE_URL,
)
from .helpers import AbstractConfig
from .smart_home import async_handle_message
_LOGGER = logging.getLogger(__name__)
def _get_homegraph_jwt(time, iss, key):
now = int(time.timestamp())
jwt_raw = {
"iss": iss,
"scope": HOMEGRAPH_SCOPE,
"aud": HOMEGRAPH_TOKEN_URL,
"iat": now,
"exp": now + 3600,
}
return jwt.encode(jwt_raw, key, algorithm="RS256").decode("utf-8")
async def _get_homegraph_token(hass, jwt_signed):
headers = {
"Authorization": "Bearer {}".format(jwt_signed),
"Content-Type": "application/x-www-form-urlencoded",
}
data = {
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"assertion": jwt_signed,
}
session = async_get_clientsession(hass)
async with session.post(HOMEGRAPH_TOKEN_URL, headers=headers, data=data) as res:
res.raise_for_status()
return await res.json()
class GoogleConfig(AbstractConfig):
"""Config for manual setup of Google."""
def __init__(self, hass, config):
"""Initialize the config."""
super().__init__(hass)
self._config = config
self._access_token = None
self._access_token_renew = None
@property
def enabled(self):
"""Return if Google is enabled."""
return True
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return self._config.get(CONF_SECURE_DEVICES_PIN)
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self._config.get(CONF_REPORT_STATE)
def should_expose(self, state) -> bool:
"""Return if entity should be exposed."""
expose_by_default = self._config.get(CONF_EXPOSE_BY_DEFAULT)
exposed_domains = self._config.get(CONF_EXPOSED_DOMAINS)
if state.attributes.get("view") is not None:
# Ignore entities that are views
return False
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
explicit_expose = self.entity_config.get(state.entity_id, {}).get(CONF_EXPOSE)
domain_exposed_by_default = (
expose_by_default and state.domain in exposed_domains
)
# Expose an entity if the entity's domain is exposed by default and
# the configuration doesn't explicitly exclude it from being
# exposed, or if the entity is explicitly exposed
is_default_exposed = domain_exposed_by_default and explicit_expose is not False
return is_default_exposed or explicit_expose
def should_2fa(self, state):
"""If an entity should have 2FA checked."""
return True
async def _async_request_sync_devices(self, agent_user_id: str):
if CONF_API_KEY in self._config:
await self.async_call_homegraph_api_key(
REQUEST_SYNC_BASE_URL, {"agentUserId": agent_user_id}
)
elif CONF_SERVICE_ACCOUNT in self._config:
await self.async_call_homegraph_api(
REQUEST_SYNC_BASE_URL, {"agentUserId": agent_user_id}
)
else:
_LOGGER.error("No configuration for request_sync available")
async def _async_update_token(self, force=False):
if CONF_SERVICE_ACCOUNT not in self._config:
_LOGGER.error("Trying to get homegraph api token without service account")
return
now = dt_util.utcnow()
if not self._access_token or now > self._access_token_renew or force:
token = await _get_homegraph_token(
self.hass,
_get_homegraph_jwt(
now,
self._config[CONF_SERVICE_ACCOUNT][CONF_CLIENT_EMAIL],
self._config[CONF_SERVICE_ACCOUNT][CONF_PRIVATE_KEY],
),
)
self._access_token = token["access_token"]
self._access_token_renew = now + timedelta(seconds=token["expires_in"])
async def async_call_homegraph_api_key(self, url, data):
"""Call a homegraph api with api key authentication."""
websession = async_get_clientsession(self.hass)
try:
res = await websession.post(
url, params={"key": self._config.get(CONF_API_KEY)}, json=data
)
_LOGGER.debug(
"Response on %s with data %s was %s", url, data, await res.text()
)
res.raise_for_status()
return res.status
except ClientResponseError as error:
_LOGGER.error("Request for %s failed: %d", url, error.status)
return error.status
except (asyncio.TimeoutError, ClientError):
_LOGGER.error("Could not contact %s", url)
return 500
async def async_call_homegraph_api(self, url, data):
"""Call a homegraph api with authenticaiton."""
session = async_get_clientsession(self.hass)
async def _call():
headers = {
"Authorization": "Bearer {}".format(self._access_token),
"X-GFE-SSL": "yes",
}
async with session.post(url, headers=headers, json=data) as res:
_LOGGER.debug(
"Response on %s with data %s was %s", url, data, await res.text()
)
res.raise_for_status()
return res.status
try:
await self._async_update_token()
try:
return await _call()
except ClientResponseError as error:
if error.status == 401:
_LOGGER.warning(
"Request for %s unauthorized, renewing token and retrying", url
)
await self._async_update_token(True)
return await _call()
raise
except ClientResponseError as error:
_LOGGER.error("Request for %s failed: %d", url, error.status)
return error.status
except (asyncio.TimeoutError, ClientError):
_LOGGER.error("Could not contact %s", url)
return 500
async def async_report_state(self, message, agent_user_id: str):
"""Send a state report to Google."""
data = {
"requestId": uuid4().hex,
"agentUserId": agent_user_id,
"payload": message,
}
await self.async_call_homegraph_api(REPORT_STATE_BASE_URL, data)
class GoogleAssistantView(HomeAssistantView):
"""Handle Google Assistant requests."""
url = GOOGLE_ASSISTANT_API_ENDPOINT
name = "api:google_assistant"
requires_auth = True
def __init__(self, config):
"""Initialize the Google Assistant request handler."""
self.config = config
async def post(self, request: Request) -> Response:
"""Handle Google Assistant requests."""
message: dict = await request.json()
result = await async_handle_message(
request.app["hass"], self.config, request["hass_user"].id, message
)
return self.json(result)
| apache-2.0 | 7,764,571,037,345,269,000 | 33.292887 | 87 | 0.599683 | false |
roac-monitoring/roac-agent | roac/logs.py | 1 | 1388 | # vim: set fileencoding=utf-8 :
from __future__ import absolute_import
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
def log_to_stderr(logger=None, level = logging.DEBUG):
"""Configures the python log system to log to stderr
logger: Logger to configure. Pass none to use the root logger.
Makes the root logger log to stderr and sets up a formatter that prints
the date, loglevel and logger name
"""
if logger is None:
logger = logging.getLogger()
else:
logger = logging.getLogger(logger)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(
'%(asctime)s [%(levelname)s|%(name)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def setup_logging(app):
"""Setup the python logging system according to whether the given app
is in debug mode
"""
if app.debug:
# Configure the root logger to output on stderr
log_to_stderr()
else:
# Configure the package logger to use NullHandler and avoid errors.
# The application should set up a proper handler if it wants logging
# in production.
pkg_logger = logging.getLogger(__package__)
handler = NullHandler()
pkg_logger.addHandler(handler)
| bsd-3-clause | -5,082,369,550,257,231,000 | 27.916667 | 76 | 0.669308 | false |
endlessm/chromium-browser | third_party/depot_tools/fetch_configs/config_util.py | 2 | 1638 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module holds utilities which make writing configs easier."""
from __future__ import print_function
import json
class Config(object):
"""Base class for all configs.
Provides methods that are expected to be overridden by child classes. Also
provides an command-line parsing method that converts the unified command-line
interface used in depot_tools to the unified python interface defined here."""
@staticmethod
def fetch_spec(_props):
"""Returns instructions to check out the project, conditioned on |props|."""
raise NotImplementedError
@staticmethod
def expected_root(_props):
"""Returns the directory into which the checkout will be performed."""
raise NotImplementedError
def handle_args(self, argv):
"""Passes the command-line arguments through to the appropriate method."""
methods = {'fetch': self.fetch_spec,
'root': self.expected_root}
if len(argv) <= 1 or argv[1] not in methods:
print('Must specify a a fetch/root action')
return 1
def looks_like_arg(arg):
return arg.startswith('--') and arg.count('=') == 1
bad_parms = [x for x in argv[2:] if not looks_like_arg(x)]
if bad_parms:
print('Got bad arguments %s' % bad_parms)
return 1
method = methods[argv[1]]
props = dict(x.split('=', 1) for x in (y.lstrip('-') for y in argv[2:]))
self.output(method(props))
@staticmethod
def output(data):
print(json.dumps(data))
| bsd-3-clause | -3,427,243,475,538,356,700 | 30.5 | 80 | 0.681929 | false |
ivmech/iviny-scope | lib/xlsxwriter/test/comparison/test_chart_name04.py | 1 | 2519 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_font04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [43944960, 45705472]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_title({
'name': ['Sheet1', 0, 0],
'name_font': {'bold': 0, 'italic': 1},
})
chart.set_x_axis({
'name': ['Sheet1', 1, 0],
'name_font': {'bold': 0, 'italic': 1},
})
chart.set_y_axis({
'name': ['Sheet1', 2, 0],
'name_font': {'bold': 1, 'italic': 1},
})
worksheet.insert_chart('E9', chart)
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -6,459,244,008,668,592,000 | 25.515789 | 79 | 0.472807 | false |
TacticalGoat/reddit | DelayBotT/delaybotT.py | 1 | 4260 | #/u/GoldenSights
import praw
import time
import datetime
import sqlite3
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter Bot"
SUBREDDIT = "GoldTesting"
#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..."
MAXPOSTS = 30
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 20
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
TSTRING = "[request]"
#This is the part of the title that you want to look for
DELAY = 172800
#This is the time limit between a user's posts, IN SECONDS. 1h = 3600 || 12h = 43200 || 24h = 86400 || 144h = 518400
'''All done!'''
WAITS = str(WAIT)
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS users(name TEXT, lastpost TEXT)')
print('Loaded Users')
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(id TEXT)')
print('Loaded Oldposts')
sql.commit()
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def getTime(bool):
timeNow = datetime.datetime.now(datetime.timezone.utc)
timeUnix = timeNow.timestamp()
if bool == False:
return timeNow
else:
return timeUnix
def scan():
print('Scanning ' + SUBREDDIT)
subreddit = r.get_subreddit(SUBREDDIT)
posts = subreddit.get_new(limit=MAXPOSTS)
for post in posts:
try:
pauthor = post.author.name
except Exception:
pauthor = '[deleted]'
pid = post.id
plink = post.short_link
ptime = post.created_utc
ptitle = post.title.lower()
if TSTRING.lower() in ptitle:
cur.execute('SELECT * FROM oldposts WHERE id=?', [pid])
if not cur.fetchone():
cur.execute('SELECT * FROM users WHERE name=?', [pauthor])
if not cur.fetchone():
print('Found new user: ' + pauthor)
cur.execute('INSERT INTO users VALUES(?, ?)', (pauthor, pid))
r.send_message(pauthor, 'Welcome to /r/pkmntcgtrades!','Dear ' + pauthor + ',\n\n Our bot has determined that this is your first time posting in /r/pkmntcgtrades. Please take the time to read [the guidelines](http://www.reddit.com/r/pkmntcgtrades/wiki/guidelines) to understand how the subreddit works.\n\nIf you have any questions, feel free to [message the moderators.](http://www.reddit.com/message/compose?to=%2Fr%2Fpkmntcgtrades) Thanks, and happy trading!', captcha=None)
sql.commit()
print('\t' + pauthor + ' has been added to the database.')
time.sleep(5)
else:
cur.execute('SELECT * FROM users WHERE name=?', [pauthor])
fetch = cur.fetchone()
print('Found post by known user: ' + pauthor)
previousid = fetch[1]
previous = r.get_info(thing_id='t3_'+previousid)
previoustime = previous.created_utc
if ptime > previoustime:
curtime = getTime(True)
difference = curtime - previoustime
if difference >= DELAY:
print('\tPost complies with timelimit guidelines. Permitting')
cur.execute('DELETE FROM users WHERE name=?', [pauthor])
cur.execute('INSERT INTO users VALUES(?, ?)', (pauthor, pid))
sql.commit()
print('\t' + pauthor + "'s database info has been reset.")
else:
differences = '%.0f' % (DELAY - difference)
print('\tPost does not comply with timelimit guidelines. Author must wait ' + differences)
print('\t' + pauthor + "'s database info remains unchanged")
response = post.add_comment('You are posting here too frequently, so your post has been deleted. You may post again in ' + str(datetime.timedelta(seconds=float(differences))))
response.distinguish()
post.remove(spam=False)
time.sleep(5)
cur.execute('INSERT INTO oldposts VALUES(?)', [pid])
sql.commit()
while True:
try:
scan()
except Exception as e:
print('An error has occured:', e)
print('Running again in ' + WAITS + ' seconds.\n')
time.sleep(WAIT)
| mit | -3,111,328,070,677,242,400 | 35.101695 | 482 | 0.68662 | false |
wasade/qiime | tests/test_make_otu_table.py | 1 | 4746 | #!/usr/bin/env python
# file test_make_otu_table
__author__ = "Rob Knight"
__copyright__ = "Copyright 2011, The QIIME Project" # consider project name
__credits__ = ["Rob Knight", "Justin Kuczynski", "Adam Robbins-Pianka"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
import json
from unittest import TestCase, main
from StringIO import StringIO
from biom.table import Table
from biom.parse import parse_biom_table
import numpy as np
from qiime.make_otu_table import (libs_from_seqids, seqids_from_otu_to_seqid,
make_otu_table)
from qiime.parse import parse_mapping_file, mapping_file_to_dict
class TopLevelTests(TestCase):
"""Tests of top-level functions"""
def assertEqualOtuTable(self, obs, exp):
""" """
obs = json.loads(obs)
exp = json.loads(exp)
for e in ['generated_by', 'date']:
del obs[e]
del exp[e]
self.assertEqual(obs, exp)
def test_libs_from_seqids(self):
"""libs_from_seqids should identify correct libs"""
seqids = ['ABC_001', 'DEF_002', 'ABC_003', 'GHI_JKL_001']
self.assertEqual(libs_from_seqids(seqids),
set(['ABC', 'DEF', 'GHI_JKL']))
def test_seqids_from_otu_to_seqid(self):
"""seqids_from_otu_to_seqid should return right seqids"""
otu_to_seqid = {'0': ['ABC_0', 'DEF_1'], 'x': ['GHI_2']}
self.assertEqual(seqids_from_otu_to_seqid(otu_to_seqid),
set(['ABC_0', 'DEF_1', 'GHI_2']))
def test_make_otu_table_no_taxonomy(self):
"""make_otu_table should work without tax (new-style OTU table)"""
otu_map_lines = """0 ABC_0 DEF_1
1 ABC_1
x GHI_2 GHI_3 GHI_77
z DEF_3 XYZ_1""".split('\n')
obs = make_otu_table(otu_map_lines)
data = [[1, 1, 0, 0], [1, 0, 0, 0], [0, 0, 3, 0], [0, 1, 0, 1]]
exp = Table(data, ['0', '1', 'x', 'z'], ['ABC', 'DEF', 'GHI', 'XYZ'],
input_is_dense=True)
self.assertEqual(obs, exp)
def test_make_otu_table_taxonomy(self):
"""make_otu_table should work with taxonomy"""
otu_map_lines = """0 ABC_0 DEF_1
1 ABC_1
x GHI_2 GHI_3 GHI_77
z DEF_3 XYZ_1""".split('\n')
taxonomy = {'0': ['Bacteria', 'Firmicutes'],
'x': ['Bacteria', 'Bacteroidetes']}
obs = make_otu_table(otu_map_lines, taxonomy)
data = [[1, 1, 0, 0], [1, 0, 0, 0], [0, 0, 3, 0], [0, 1, 0, 1]]
obs_md = [{'taxonomy': ['Bacteria', 'Firmicutes']},
{'taxonomy': ['None']},
{'taxonomy': ['Bacteria', 'Bacteroidetes']},
{'taxonomy': ['None']}]
exp = Table(data, ['0', '1', 'x', 'z'], ['ABC', 'DEF', 'GHI', 'XYZ'],
observation_metadata=obs_md, input_is_dense=True)
self.assertEqual(obs, exp)
def test_make_otu_table_with_sample_metadata(self):
# Want to make sure that the order of the sample IDs in the OTU
# map and the order of the IDs in the mapping file do not matter
otu_map_lines = """0 ABC_0 DEF_1
1 ABC_1
x GHI_2 GHI_3 GHI_77
z DEF_3 XYZ_1""".split('\n')
mapping_f = StringIO(MAPPING_FILE)
sample_ids = ['ABC', 'DEF', 'GHI', 'XYZ']
data = [[1, 1, 0, 0], [1, 0, 0, 0], [0, 0, 3, 0], [0, 1, 0, 1]]
map_data, map_header, map_comments = parse_mapping_file(mapping_f)
sample_metadata = mapping_file_to_dict(map_data, map_header)
sample_md = [sample_metadata[sample_id] for sample_id in sample_ids]
obs = make_otu_table(otu_map_lines, sample_metadata=sample_metadata)
exp = Table(data, ['0', '1', 'x', 'z'], sample_ids,
sample_metadata=sample_md, input_is_dense=True)
self.assertEqual(obs, exp)
# Test with a mapping file that is missing a sample's metadata,
# make sure it raises the KeyError
mapping_f = StringIO(MAPPING_FILE_MISSING_SAMPLE)
map_data, map_header, map_comments = parse_mapping_file(mapping_f)
sample_metadata = mapping_file_to_dict(map_data, map_header)
with self.assertRaises(KeyError):
obs = make_otu_table(otu_map_lines,
sample_metadata=sample_metadata)
MAPPING_FILE = """#SampleID BarcodeSequence LinkerPrimerSequence Description
ABC ATGC AAAAAA First Sample
XYZ TGCA AAAAAA Fourth Sample
GHI CATG AAAAAA Third Sample
DEF GCAT AAAAAA Second Sample
"""
MAPPING_FILE_MISSING_SAMPLE = """#SampleID BarcodeSequence LinkerPrimerSequence Description
ABC ATGC AAAAAA First Sample
XYZ TGCA AAAAAA Fourth Sample
DEF GCAT AAAAAA Second Sample
"""
if __name__ == '__main__':
main()
| gpl-2.0 | 984,590,734,029,470,100 | 35.507692 | 91 | 0.584703 | false |
recursecenter/RSVPBot | strings.py | 1 | 2577 | import config
ANNOUNCE_MESSAGE = """
**[{title}]({url})**
{timestamp}
Created by {created_by}
To start an RSVPBot thread for this event:
```{key_word} init {url}```
""".strip()
MSG_CREATE_EVENT_ON_RC_CALENDAR = """
RSVPBot events are saved on the RC calendar. To create an event that will be tracked in this thread, go here: %s/calendar/new?{}
""".strip() % config.rc_root
MSG_INIT_SUCCESSFUL = 'This thread is now an RSVPBot event for **[{}]({})**! Type `rsvp help` for more options.'
MSG_EVENT_MOVED = "This event has been moved to **[%s](%s)**!"
ERROR_INVALID_COMMAND = "`%s` is not a valid RSVPBot command! Type `rsvp help` for the correct syntax."
ERROR_NOT_AN_EVENT = "This thread is not an RSVPBot event! Type `rsvp init event-url` to make it into an event."
ERROR_ALREADY_AN_EVENT = "Oops! That thread is already an RSVPBot event!"
ERROR_MISSING_MOVE_DESTINATION = "`rsvp move` requires a Zulip stream URL destination (e.g. 'https://recurse.zulipchat.com/#narrow/stream/announce/topic/All.20Hands.20Meeting')"
ERROR_BAD_MOVE_DESTINATION = "%s is not a valid move destination URL! `rsvp move` requires a Zulip stream URL destination (e.g. 'https://recurse.zulipchat.com/#narrow/stream/announce/topic/All.20Hands.20Meeting') Type `rsvp help` for the correct syntax."
ERROR_MOVE_ALREADY_AN_EVENT = "Oops! %s is already an RSVPBot event!"
ERROR_EVENT_NOT_FOUND = "Oops! I couldn't find this event: {}"
ERROR_EVENT_ALREADY_INITIALIZED = "Oops! This event was already initialized here: {}"
ERROR_GOOGLE_CALENDAR_NO_LONGER_USED = "Oops! RSVPBot no longer uses Google Calendar, but it uses the [RC Calendar](%s/calendar) instead. This event can be found [here]({})." % config.rc_root
ERROR_FUNCTIONALITY_MOVED = "Oops! RSVPBot doesn't support `rsvp {}` directly anymore. You can now do this [on the RC calendar]({})!"
ERROR_RSVP_MAYBE_NOT_SUPPORTED = "Oops! `rsvp maybe` is no longer supported."
ERROR_CANNOT_INIT_IN_ANNOUNCE_THREAD = "Oops! You cannot `rsvp init` in the announce thread."
ERROR_SERVER_EXCEPTION = ":scream: Something went terribly wrong inside RSVPBot. If this keeps happening, please ping `@Faculty`!"
ERROR_NO_EVENT_ID = """
`rsvp init` must be passed an RC Calendar event ID or URL. For example:
```
rsvp init %s/calendar/123-my-event
```
""".strip() % config.rc_root
ERROR_THREAD_FROM_RC_ALREADY_AN_EVENT = """
Oops! Someone tried to create an event on the RC calendar using this thread, but it's already tracking an event.
Here's the event: **[{title}]({url})**
To start another RSVPBot thread for this event:
```rsvp init {url}```
""".strip()
| mit | 5,160,972,723,061,756,000 | 52.6875 | 254 | 0.719829 | false |
saymedia/flask-compress | flask_compress.py | 1 | 3222 | import gzip
try:
from io import BytesIO as IO
except:
import StringIO as IO
from flask import request
class Compress(object):
"""
The Compress object allows your application to use Flask-Compress.
When initialising a Compress object you may optionally provide your
:class:`flask.Flask` application object if it is ready. Otherwise,
you may provide it later by using the :meth:`init_app` method.
:param app: optional :class:`flask.Flask` application object
:type app: :class:`flask.Flask` or None
"""
def __init__(self, app=None):
"""
An alternative way to pass your :class:`flask.Flask` application
object to Flask-Compress. :meth:`init_app` also takes care of some
default `settings`_.
:param app: the :class:`flask.Flask` application object.
"""
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
defaults = [
('COMPRESS_MIMETYPES', ['text/html', 'text/css', 'text/xml',
'application/json',
'application/javascript']),
('COMPRESS_DEBUG', False),
('COMPRESS_LEVEL', 6),
('COMPRESS_MIN_SIZE', 500)
]
for k, v in defaults:
app.config.setdefault(k, v)
if app.config['COMPRESS_MIMETYPES']:
self.app.after_request(self.after_request)
def after_request(self, response):
# return the response untouched for responses that will never be
# gzipped, in any contexts.
if response.mimetype not in self.app.config['COMPRESS_MIMETYPES']:
return response
# At this point, always put the Vary header, even if the content
# is not gzipped in this particular context.
# Also, apparently, werkzeug has no documented method to "add", not "set", a header.
# So we rely on comma separated values.
if 'Vary' in response.headers and response.headers['Vary'] is not None and response.headers['Vary'] != "":
response.headers['Vary'] += ', Accept-Encoding'
else:
response.headers['Vary'] = 'Accept-Encoding'
if self.app.debug and not self.app.config['COMPRESS_DEBUG']:
return response
accept_encoding = request.headers.get('Accept-Encoding', '')
if 'gzip' not in accept_encoding.lower():
return response
response.direct_passthrough = False
if (response.status_code < 200 or
response.status_code >= 300 or
len(response.data) < self.app.config['COMPRESS_MIN_SIZE'] or
'Content-Encoding' in response.headers):
return response
level = self.app.config['COMPRESS_LEVEL']
gzip_buffer = IO()
gzip_file = gzip.GzipFile(mode='wb', compresslevel=level,
fileobj=gzip_buffer)
gzip_file.write(response.data)
gzip_file.close()
response.data = gzip_buffer.getvalue()
response.headers['Content-Encoding'] = 'gzip'
response.headers['Content-Length'] = len(response.data)
return response
| mit | -5,863,244,114,132,079,000 | 33.645161 | 114 | 0.597455 | false |
eco32i/ggplot | ggplot/geoms/geom_abline.py | 1 | 1260 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
from matplotlib.dates import drange, date2num
from pandas.lib import Timestamp
from datetime import timedelta
import numpy as np
from .geom import geom
import pandas as pd
class geom_abline(geom):
VALID_AES = ['x', 'slope', 'intercept', 'color', 'linestyle', 'alpha', 'label']
def plot_layer(self, layer):
layer = dict((k, v) for k, v in layer.items() if k in self.VALID_AES)
layer.update(self.manual_aes)
if 'x' in layer:
x = layer.pop('x')
if 'slope' in layer:
slope = layer.pop('slope')
else:
slope = 1.0
if 'intercept' in layer:
intercept = layer.pop('intercept')
else:
intercept = 0.0
if isinstance(x[0], Timestamp):
gca = plt.gca()
gca.set_autoscale_on(False)
gca.plot(gca.get_xlim(),gca.get_ylim())
else:
start, stop = np.max(x), np.min(x)
step = ((stop-start)) / 100.0
x_rng = np.arange(start, stop, step)
y_rng = x_rng * slope + intercept
plt.plot(x_rng, y_rng, **layer)
| bsd-2-clause | -2,361,244,803,072,503,300 | 34 | 83 | 0.55873 | false |
qvazzler/Flexget | flexget/plugins/urlrewrite/isohunt.py | 1 | 3608 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import quote
import logging
import re
import feedparser
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.search import torrent_availability, normalize_unicode
log = logging.getLogger('isohunt')
class UrlRewriteIsoHunt(object):
"""IsoHunt urlrewriter and search plugin.
should accept:
isohunt: <category>
categories:
empty or -1: All
0 : Misc.
1 : Video/Movies
2 : Audio
3 : TV
4 : Games
5 : Apps
6 : Pics
7 : Anime
8 : Comics
9 : Books
10: Music Video
11: Unclassified
12: ALL
"""
schema = {
'type': 'string',
'enum': ['misc', 'movies', 'audio', 'tv', 'games', 'apps', 'pics', 'anime', 'comics', 'books', 'music video',
'unclassified', 'all']
}
def url_rewritable(self, task, entry):
url = entry['url']
# search is not supported
if url.startswith('http://isohunt.com/torrents/?ihq='):
return False
# not replaceable
if 'torrent_details' not in url:
return False
return url.startswith('http://isohunt.com') and url.find('download') == -1
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('torrent_details', 'download')
def search(self, task, entry, config):
# urllib.quote will crash if the unicode string has non ascii characters, so encode in utf-8 beforehand
optionlist = ['misc', 'movies', 'audio', 'tv', 'games', 'apps', 'pics', 'anime', 'comics', 'books',
'music video', 'unclassified', 'all']
entries = set()
search_strings = [normalize_unicode(s) for s in entry.get('search_strings', [entry['title']])]
for search_string in search_strings:
url = 'http://isohunt.com/js/rss/%s?iht=%s&noSL' % (
quote(search_string.encode('utf-8')), optionlist.index(config))
log.debug('requesting: %s' % url)
rss = feedparser.parse(url)
status = rss.get('status', False)
if status != 200:
raise plugin.PluginWarning('Search result not 200 (OK), received %s' % status)
ex = rss.get('bozo_exception', False)
if ex:
raise plugin.PluginWarning('Got bozo_exception (bad feed)')
for item in rss.entries:
entry = Entry()
entry['title'] = item.title
entry['url'] = item.link
m = re.search(r'Size: ([\d]+).*Seeds: (\d+).*Leechers: (\d+)', item.description, re.IGNORECASE)
if not m:
log.debug('regexp did not find seeds / peer data')
continue
else:
log.debug('regexp found size(%s), Seeds(%s) and Leeches(%s)' % (m.group(1), m.group(2), m.group(3)))
entry['content_size'] = int(m.group(1))
entry['torrent_seeds'] = int(m.group(2))
entry['torrent_leeches'] = int(m.group(3))
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
entries.add(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteIsoHunt, 'isohunt', groups=['urlrewriter', 'search'], api_ver=2)
| mit | -2,577,430,881,180,603,000 | 33.361905 | 120 | 0.56541 | false |
standage/sequniq | sequniq/parse.py | 1 | 3833 | # -----------------------------------------------------------------------------
# Copyright (C) Daniel Standage, 2015. It is licensed under the ISC license,
# see LICENSE.txt. Contact: [email protected]
# -----------------------------------------------------------------------------
"""
Generators for parsing sequence data records in Fasta and Fastq.
"""
import sys
def get_parser(fastq=True, paired=True):
"""
Return a pointer to the correct parsing function based on the specified
format of the data.
"""
if fastq:
if paired:
return fastq_paired
else:
return fastq
else:
if paired:
return fasta_paired
else:
return fasta
def check_record(record, fastq=True, paired=True):
"""
"""
if fastq:
if paired:
assert len(record) == 6
else:
assert len(record) == 3
else:
if paired:
assert len(record) == 4
else:
assert len(record) == 2
def fasta(fp):
"""
Generator yields sequence records from Fasta files. Stolen shamelessly from
http://stackoverflow.com/a/7655072/459780.
"""
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith(">"):
if name:
yield name, ''.join(seq)
name, seq = line, []
else:
seq.append(line)
if name:
yield name, ''.join(seq)
def fasta_paired(fp):
"""
Generator yields paired sequence records from Fasta files.
"""
defline_i, seq_i = None, None
for defline_j, seq_j in fasta(fp):
if seq_i is None:
defline_i, seq_i = defline_j, seq_j
else:
yield defline_i, seq_i, defline_j, seq_j
defline_i, seq_i = None, None
assert seq_i is None, 'paired Fasta data contains odd number of sequences'
def fastq(fp):
"""
Generator yields unpaired sequence records from Fastq files. Only supports
4-line Fastq format.
"""
linecount = 0
name, seq, qual = [None] * 3
for line in fp:
linecount += 1
if linecount % 4 == 1:
name = line.rstrip()
elif linecount % 4 == 2:
seq = line.rstrip()
elif linecount % 4 == 0:
qual = line.rstrip()
yield name, seq, qual
name, seq, qual = [None] * 3
def fastq_paired(fp):
"""
Generator yields paired sequence records from Fastq files. Only supports
4-line Fastq format with interleaved pairs.
"""
linecount = 0
name1, seq1, qual1, name2, seq2, qual2 = [None] * 6
for line in fp:
linecount += 1
if linecount % 8 == 1:
name1 = line.rstrip()
elif linecount % 8 == 2:
seq1 = line.rstrip()
elif linecount % 8 == 4:
qual1 = line.rstrip()
elif linecount % 8 == 5:
name2 = line.rstrip()
elif linecount % 8 == 6:
seq2 = line.rstrip()
elif linecount % 8 == 0:
qual2 = line.rstrip()
yield name1, seq1, qual1, name2, seq2, qual2
name1, seq1, qual1, name2, seq2, qual2 = [None] * 6
def write(record, outstream=sys.stdout):
"""
Write Fasta/Fastq records.
Records are tuples:
- 2 elements = unpaired Fasta
- 3 elements = unpaired Fastq
- 4 elements = paired Fasta
- 6 elements = paired Fastq
"""
if len(record) == 2:
fmt = '%s\n%s'
elif len(record) == 4:
fmt = '%s\n%s\n%s\n%s'
elif len(record) == 3:
fmt = '%s\n%s\n+\n%s'
elif len(record) == 6:
fmt = '%s\n%s\n+\n%s\n%s\n%s\n+\n%s'
else:
raise Exception('record has % elements' % len(record))
print >> outstream, fmt % record
| isc | 2,973,343,182,879,858,700 | 26.378571 | 79 | 0.516045 | false |
alienlike/courier | courier/models/account_link.py | 1 | 1782 | from datetime import datetime
from sqlalchemy import Column, Integer, ForeignKey, Boolean, DateTime
from sqlalchemy.orm import relationship, backref
from .base import DeclarativeBase
class AccountLink(DeclarativeBase):
# table
__tablename__ = 'account_link'
# columns
id = Column(Integer, primary_key=True, nullable=False)
peer_id = Column(Integer, ForeignKey('account_link.id'))
link_id = Column(Integer, ForeignKey('link.id', ondelete='CASCADE'), nullable=False)
from_account_id = Column(Integer, ForeignKey('account.id', ondelete='CASCADE'), nullable=False)
to_account_id = Column(Integer, ForeignKey('account.id', ondelete='CASCADE'), nullable=False)
created_date = Column(DateTime, nullable=False, default=datetime.now)
modified_date = Column(DateTime, nullable=False, default=datetime.now, onupdate=datetime.now)
hidden = Column(Boolean, nullable=False, default=False)
# relationships
peer = relationship('AccountLink', remote_side=[id], post_update=True)
link = relationship('Link',
backref=backref('account_links', lazy=True),
primaryjoin='Link.id==AccountLink.link_id')
from_account = relationship('Account',
backref=backref('account_links', lazy=True),
primaryjoin='Account.id==AccountLink.from_account_id')
to_account = relationship('Account',
primaryjoin='Account.id==AccountLink.to_account_id') # no backref
# constructor
def __init__(self, link, from_account, to_account):
self.link = link
self.from_account = from_account
self.to_account = to_account
self.hidden = False | gpl-3.0 | 4,681,192,711,699,085,000 | 45.921053 | 109 | 0.643659 | false |
svm-zhang/poolseq_tk | sz_acount.py | 1 | 3378 | '''
python poolseq_tk.py count
Description: Count alleles at each SNP give the pileups
Author: Simo V. Zhang
Input: pileup file with reads bases converted to corresponding alleles
Output: pielup file with allele counts
(1) chr
(2) pos
(3) ref base
(4) alt base
(5) allele counts in the order of ref and alt, separated by colon
'''
import collections
import sys
import os
import sz_utils
from colortext import ColorText
def run_count(args):
''' Counting alleles at each SNP in the given pileup files '''
dPos = {}
if args.pos:
ColorText().info("[poolseq_tk] reading SNPs positions:", "stderr")
with open(args.pos, 'r') as fPOS:
for line in fPOS:
tmp_line = line.strip().split("\t")
chr = tmp_line[0]
pos = int(tmp_line[1])
if (chr, pos) not in dPos:
dPos[chr, pos] = 1
ColorText().info(" %d\n" %(len(dPos)), "stderr")
else:
ColorText().info("[poolseq_tk] no SNP positions provided ... [skipped]\n", "stderr")
ac = collections.defaultdict(tuple)
for pileup in args.pileups:
sz_utils.check_if_files_exist(pileup)
nsnps = 0
ColorText().info("[poolseq_tk] counting alleles in %s:" %(os.path.basename(pileup)), "stderr")
with open(pileup, 'r') as fMPILEUP:
for line in fMPILEUP:
nsnps += 1
tmp_line = line.strip().split("\t")
chr = tmp_line[0]
pos = int(tmp_line[1])
if (((chr, pos) in dPos and args.pos) or
(len(dPos) == 0 and not args.pos)):
ref_base = tmp_line[2]
alt_base = tmp_line[3]
nRefAlleles, nAltAlleles = 0, 0
if len(tmp_line) == 5:
nRefAlleles = tmp_line[-1].count(ref_base) + \
tmp_line[-1].count(ref_base.lower())
nAltAlleles = tmp_line[-1].count(alt_base) + \
tmp_line[-1].count(alt_base.lower())
if (chr, pos) not in ac:
ac[chr, pos] = [ref_base, alt_base, str(nRefAlleles), str(nAltAlleles)]
else:
ac[chr, pos] += [str(nRefAlleles), str(nAltAlleles)]
ColorText().info(" %d SNPs parsed\n" %(nsnps), "stderr")
fOUT = None
if args.out == sys.stdout:
fOUT = sys.stdout
else:
sz_utils.make_dirs_if_necessary(args.out)
fOUT = open(args.out, 'w')
ColorText().info("[poolseq_tk] outputting allele counts to table ...", "stderr")
for k in sorted(ac.iterkeys()):
chr = k[0]
pos = k[1]
i = 2
if len(ac[k][i:]) == 2*len(args.pileups):
fOUT.write("%s\t%d\t%s" %(chr, pos, "\t".join(ac[k][0:2])))
while i <= len(ac[k])-4:
fOUT.write("\t%s" %(":".join(ac[k][i:i+4])))
i += 4
fOUT.write("\n")
ColorText().info(" [done]\n", "stderr")
fOUT.close()
def parseReadsBases(reads_bases, refBase, altBase):
i = 0
nRefAlleles, nAltAlleles = 0, 0
nOtherAlleles = 0
cov = 0
while i < len(reads_bases):
if reads_bases[i] == '.':
nRefAlleles += 1
i += 1
elif reads_bases[i] == ',':
nRefAlleles += 1
i += 1
elif reads_bases[i] == altBase:
nAltAlleles += 1
i += 1
elif reads_bases[i] == altBase.lower():
nAltAlleles += 1
i += 1
elif reads_bases[i] in ['+', '-', '*']:
if reads_bases[i] == '*':
i += 1
else:
len_indel = int(re.search(r'\d+', reads_bases[i+1:i+3]).group())
i += len_indel + len(str(len_indel)) + 1
elif reads_bases[i] == '^':
i += 2
elif reads_bases[i] in ['N', 'n', '$']:
i += 1
else:
nOtherAlleles += 1
i += 1
cov += 1
return cov, nRefAlleles, nAltAlleles, nOtherAlleles
| gpl-2.0 | -4,970,120,119,147,440,000 | 26.917355 | 96 | 0.598283 | false |
mbauskar/erpnext | erpnext/patches/v8_7/make_subscription_from_recurring_data.py | 1 | 1634 | # Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import today
def execute():
frappe.reload_doc('subscription', 'doctype', 'subscription')
frappe.reload_doc('selling', 'doctype', 'sales_order')
frappe.reload_doc('buying', 'doctype', 'purchase_order')
frappe.reload_doc('accounts', 'doctype', 'sales_invoice')
frappe.reload_doc('accounts', 'doctype', 'purchase_invoice')
for doctype in ['Sales Order', 'Sales Invoice',
'Purchase Invoice', 'Purchase Invoice']:
for data in get_data(doctype):
make_subscription(doctype, data)
def get_data(doctype):
return frappe.db.sql(""" select name, from_date, end_date, recurring_type,recurring_id
next_date, notify_by_email, notification_email_address, recurring_print_format,
repeat_on_day_of_month, submit_on_creation
from `tab{0}` where is_recurring = 1 and next_date >= %s
""".format(doctype), today(), as_dict=1)
def make_subscription(doctype, data):
doc = frappe.get_doc({
'doctype': 'Subscription',
'reference_doctype': doctype,
'reference_document': data.name,
'start_date': data.from_date,
'end_date': data.end_date,
'frequency': data.recurring_type,
'repeat_on_day': data.repeat_on_day_of_month,
'notify_by_email': data.notify_by_email,
'recipients': data.notification_email_address,
'next_schedule_date': data.next_date,
'submit_on_creation': data.submit_on_creation
}).insert(ignore_permissions=True)
doc.submit()
if not doc.subscription:
frappe.db.set_value(doctype, data.name, "subscription", doc.name) | gpl-3.0 | -903,746,289,309,461,900 | 35.333333 | 87 | 0.72093 | false |
gmjosack/auditor | auditor/wsgi.py | 1 | 1406 | """
WSGI config for auditor project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | 7,586,960,774,314,234,000 | 42.9375 | 79 | 0.792319 | false |
ScienceWorldCA/domelights | backend/domeplayer/scripts/chaser-blue-fade.py | 1 | 1152 | import socket
import sys
import time
import random
import base64
""" Fading white chaser pattern """
HOST, PORT = "localhost", 9999
FIXTURES = 260
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
canvas = []
for i in range(0,FIXTURES*3):
canvas.append(0)
fp = open( 'running', 'w' )
fp.write( '' )
fp.close()
running = True
while running:
for fixture in range( 0, FIXTURES ):
for i in range( 0, FIXTURES*3 ):
if i % 3 != 2:
canvas[i] = 0
if canvas[i] > 0:
canvas[i] = canvas[i] - 1
r_pixel = (fixture*3)
g_pixel = r_pixel + 1
b_pixel = g_pixel + 1
canvas[r_pixel] = 255 ### random.randint(0,255)
canvas[g_pixel] = 255 ### random.randint(0,255)
canvas[b_pixel] = 255 ### random.randint(0,255)
data = ''
for j in range(0,len(canvas)):
data = data + chr(canvas[j]) ## Blue
try:
sock.send(data)
except socket.error as msg:
print msg
break
time.sleep(0.0225)
## Check if we're still running
fp = open( 'running', 'r' )
inp = fp.read().strip()
if inp == "STOP":
running = False
fp.close()
sock.close()
| apache-2.0 | 6,400,051,492,977,337,000 | 18.525424 | 56 | 0.594618 | false |
PaddlePaddle/models | PaddleCV/image_classification/fast_imagenet/reader.py | 1 | 6775 | #copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import math
import random
import pickle
from tqdm import tqdm
import time
import multiprocessing
import transforms
import datasets
FINISH_EVENT = "FINISH_EVENT"
class PaddleDataLoader(object):
def __init__(self,
dataset,
indices=None,
concurrent=24,
queue_size=3072,
shuffle=True,
shuffle_seed=0):
self.dataset = dataset
self.indices = indices
self.concurrent = concurrent
self.shuffle = shuffle
self.shuffle_seed = shuffle_seed
self.queue_size = queue_size // self.concurrent
def _worker_loop(self, queue, worker_indices, worker_id):
cnt = 0
for idx in worker_indices:
cnt += 1
img, label = self.dataset[idx]
img = np.array(img).astype('uint8').transpose((2, 0, 1))
queue.put((img, label))
print("worker: [%d] read [%d] samples. " % (worker_id, cnt))
queue.put(FINISH_EVENT)
def reader(self):
def _reader_creator():
worker_processes = []
index_queues = []
total_img = len(self.dataset)
print("total image: ", total_img)
if self.shuffle:
self.indices = [i for i in xrange(total_img)]
random.seed(self.shuffle_seed)
random.shuffle(self.indices)
print("shuffle indices: %s ..." % self.indices[:10])
imgs_per_worker = int(math.ceil(total_img / self.concurrent))
for i in xrange(self.concurrent):
start = i * imgs_per_worker
end = (i + 1
) * imgs_per_worker if i != self.concurrent - 1 else None
sliced_indices = self.indices[start:end]
index_queue = multiprocessing.Queue(self.queue_size)
w = multiprocessing.Process(
target=self._worker_loop,
args=(index_queue, sliced_indices, i))
w.daemon = True
w.start()
worker_processes.append(w)
index_queues.append(index_queue)
finish_workers = 0
worker_cnt = len(worker_processes)
recv_index = 0
while finish_workers < worker_cnt:
while (index_queues[recv_index].empty()):
recv_index = (recv_index + 1) % self.concurrent
sample = index_queues[recv_index].get()
recv_index = (recv_index + 1) % self.concurrent
if sample == FINISH_EVENT:
finish_workers += 1
else:
yield sample
return _reader_creator
def train(traindir, sz, min_scale=0.08, shuffle_seed=0):
train_tfms = [
transforms.RandomResizedCrop(
sz, scale=(min_scale, 1.0)), transforms.RandomHorizontalFlip()
]
train_dataset = datasets.ImageFolder(traindir,
transforms.Compose(train_tfms))
return PaddleDataLoader(train_dataset, shuffle_seed=shuffle_seed).reader()
def test(valdir, bs, sz, rect_val=False):
if rect_val:
idx_ar_sorted = sort_ar(valdir)
idx_sorted, _ = zip(*idx_ar_sorted)
idx2ar = map_idx2ar(idx_ar_sorted, bs)
ar_tfms = [transforms.Resize(int(sz * 1.14)), CropArTfm(idx2ar, sz)]
val_dataset = ValDataset(valdir, transform=ar_tfms)
return PaddleDataLoader(
val_dataset, concurrent=1, indices=idx_sorted,
shuffle=False).reader()
val_tfms = [transforms.Resize(int(sz * 1.14)), transforms.CenterCrop(sz)]
val_dataset = datasets.ImageFolder(valdir, transforms.Compose(val_tfms))
return PaddleDataLoader(val_dataset).reader()
class ValDataset(datasets.ImageFolder):
def __init__(self, root, transform=None, target_transform=None):
super(ValDataset, self).__init__(root, transform, target_transform)
def __getitem__(self, index):
path, target = self.imgs[index]
sample = self.loader(path)
if self.transform is not None:
for tfm in self.transform:
if isinstance(tfm, CropArTfm):
sample = tfm(sample, index)
else:
sample = tfm(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
class CropArTfm(object):
def __init__(self, idx2ar, target_size):
self.idx2ar, self.target_size = idx2ar, target_size
def __call__(self, img, idx):
target_ar = self.idx2ar[idx]
if target_ar < 1:
w = int(self.target_size / target_ar)
size = (w // 8 * 8, self.target_size)
else:
h = int(self.target_size * target_ar)
size = (self.target_size, h // 8 * 8)
return transforms.center_crop(img, size)
def sort_ar(valdir):
idx2ar_file = valdir + '/../sorted_idxar.p'
if os.path.isfile(idx2ar_file):
return pickle.load(open(idx2ar_file, 'rb'))
print(
'Creating AR indexes. Please be patient this may take a couple minutes...'
)
val_dataset = datasets.ImageFolder(
valdir) # AS: TODO: use Image.open instead of looping through dataset
sizes = [img[0].size for img in tqdm(val_dataset, total=len(val_dataset))]
idx_ar = [(i, round(s[0] * 1.0 / s[1], 5)) for i, s in enumerate(sizes)]
sorted_idxar = sorted(idx_ar, key=lambda x: x[1])
pickle.dump(sorted_idxar, open(idx2ar_file, 'wb'))
print('Done')
return sorted_idxar
def chunks(l, n):
n = max(1, n)
return (l[i:i + n] for i in range(0, len(l), n))
def map_idx2ar(idx_ar_sorted, batch_size):
ar_chunks = list(chunks(idx_ar_sorted, batch_size))
idx2ar = {}
for chunk in ar_chunks:
idxs, ars = list(zip(*chunk))
mean = round(np.mean(ars), 5)
for idx in idxs:
idx2ar[idx] = mean
return idx2ar
| apache-2.0 | 5,752,484,946,407,509,000 | 34.103627 | 82 | 0.585387 | false |
binary-array-ld/bald | lib/bald/tests/integration/test_hdf_graph.py | 1 | 2434 | import os
import unittest
import h5py
import numpy as np
import bald
from bald.tests import BaldTestCase
def _fattrs(f):
f.attrs['rdf__type'] = 'bald__Container'
group_pref = f.create_group('bald_prefix_list')
group_pref.attrs['bald__'] = 'https://www.opengis.net/def/binary-array-ld/'
group_pref.attrs['rdf__'] = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
group_pref.attrs['skos__'] = 'http://www.w3.org/2004/02/skos/core#'
f.attrs['bald__isPrefixedBy'] = group_pref.ref
return f
def _create_parent_child(container, pname, pshape, cname, cshape):
dsetp = container.create_dataset(pname, pshape, dtype='i')
dsetc = container.create_dataset(cname, cshape, dtype='i')
dsetp.attrs['rdf__type'] = 'bald__Array'
dsetp.attrs['bald__references'] = dsetc.ref
dsetc.attrs['rdf__type'] = 'bald__Array'
dsetc.attrs['rdf__type'] = 'bald__Reference'
dsetc.attrs['bald__array'] = dsetc.ref
return container
class TestHDFGraph(BaldTestCase):
def setUp(self):
self.html_path = os.path.join(os.path.dirname(__file__), 'HTML')
def test_match(self):
with self.temp_filename('.hdf') as tfile:
f = h5py.File(tfile, "w")
f = _fattrs(f)
f = _create_parent_child(f, 'data', (11, 17), 'alocation', (11, 17))
group_d = f.create_group('discovery')
group_s = group_d.create_group('source')
group_r = f.create_group('referencing')
_create_parent_child(group_d, 'apair', (2,), 'anotherpair', (2,))
inst = group_s.create_dataset('institution', ())
inst.attrs['skos__prefLabel'] = 'a quality establishment'
sref = group_r.create_dataset('locref', ())
sref.attrs['skos__prefLabel'] = 'for locational purposes'
sref2 = group_r.create_dataset('locref2', ())
sref2.attrs['skos__prefLabel'] = 'for more locational purposes'
f['alocation'].attrs['bald__references'] = np.array([sref.ref, sref2.ref],
dtype=h5py.special_dtype(ref=h5py.Reference))
f.close()
root_container = bald.load_hdf5(tfile, cache=self.acache)
html = root_container.viewgraph()
# with open(os.path.join(self.html_path, 'hdf_container_nest.html'), 'w') as sf:
# sf.write(html)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,929,595,878,869,489,400 | 40.254237 | 108 | 0.591208 | false |
pculture/unisubs | apps/teams/signals.py | 1 | 7525 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
import logging
from django import dispatch
logger = logging.getLogger(__name__)
member_leave = dispatch.Signal()
member_remove = dispatch.Signal()
video_removed_from_team = dispatch.Signal(providing_args=["team", "user"])
video_moved_from_team_to_team = dispatch.Signal(
providing_args=["destination_team", "old_team", "video"])
video_moved_from_project_to_project = dispatch.Signal(
providing_args=["old_project", "new_project", "video"])
team_settings_changed = dispatch.Signal(
providing_args=["user", "changed_settings", "old_settings"])
# Called when we're creating forms for the team manage videos page. The
# sender will be the team. Append new forms to the form_list parameter
build_video_management_forms = dispatch.Signal(providing_args=['form_list'])
# Notification-related signals
# There is quite a bit of indirection here, but the goal is to make
# dispatching these events as simple as possible, since it might occur
# in multiple places.
#
# 1) Client codes dispatches a signal listed in this module:
# ex: signals.api_on_subtitles_edited.send(subtitle_version)
# 2) The signal calls that handler, which chooses the right event name
# for the signal and calls the matching sub method (for videos, languages, etc)
# 3) The submethod finds all teams that should be notified (since a video)
# can belong to more than on team). For each team:
# 3a) Puts the right task on queue, if the teams has a TeamNotificationsSettings
# 3b) The taks querys the TeamNotificationSettings models to fire notifications
# 3c) The TNS checks if there is available data (e.g. which url to post to)
# 3d) Instantiates the right notification class (since specific partners must
# have their notification data massaged to their needs - e.g. changing the video
# ids to their own, or the api links to their own endpoints)
# 3e) The notification class fires the notification
def _teams_to_notify(video):
"""
Returns a list of teams to be notified of events releated to this
video.
"""
from teams.models import Team
from django.db.models import Q
return list(Team.objects.filter(
Q(notification_settings__isnull=False) |
Q(partner__notification_settings__isnull=False),
teamvideo__video=video))
def _execute_video_task(video, event_name):
from teams import tasks as team_tasks
from teams.models import TeamVideo
from django.db.models import Q
logger.info("notification: %s (video: %s)", event_name, video)
tvs = list(TeamVideo.objects.filter(
Q(team__notification_settings__isnull=False) |
Q(team__partner__notification_settings__isnull=False),
video=video))
for tv in tvs:
team_tasks.api_notify_on_video_activity.delay(
tv.team.pk,
event_name,
tv.video.video_id)
def _execute_language_task(language, event_name):
from teams import tasks as team_tasks
logger.info("notification: %s (language: %s)", event_name, language)
video = language.video
teams = _teams_to_notify(video)
for team in teams:
team_tasks.api_notify_on_language_activity.delay(
team.pk,
event_name,
language.pk)
def _execute_version_task(version, event_name):
from teams import tasks as team_tasks
logger.info("notification: %s (version: %s)", event_name, version)
video = version.video
teams = _teams_to_notify(video)
for team in teams:
team_tasks.api_notify_on_subtitles_activity.delay(
team.pk,
event_name,
version.pk)
def _execute_application_task(application, event_name):
from teams.tasks import api_notify_on_application_activity
api_notify_on_application_activity.delay(
application.team.pk,
event_name,
application.pk,
)
def api_on_subtitles_edited(sender, **kwargs):
from teams.models import TeamNotificationSetting
_execute_version_task(sender, TeamNotificationSetting.EVENT_SUBTITLE_NEW)
def api_on_subtitles_approved(sender, **kwargs):
from teams.models import TeamNotificationSetting
_execute_version_task(sender, TeamNotificationSetting.EVENT_SUBTITLE_APPROVED)
def api_on_subtitles_rejected(sender, **kwargs):
from teams.models import TeamNotificationSetting
_execute_version_task(sender, TeamNotificationSetting.EVENT_SUBTITLE_REJECTED)
def api_on_language_edited(sender, **kwargs):
from teams.models import TeamNotificationSetting
_execute_language_task(sender, TeamNotificationSetting.EVENT_LANGUAGE_EDITED)
def api_on_language_new(sender, **kwargs):
from teams.models import TeamNotificationSetting
_execute_language_task(sender, TeamNotificationSetting.EVENT_LANGUAGE_NEW)
def api_on_video_edited(sender, **kwargs):
from teams.models import TeamNotificationSetting
_execute_video_task(sender, TeamNotificationSetting.EVENT_VIDEO_EDITED)
def api_on_teamvideo_new(sender, **kwargs):
from teams import tasks as team_tasks
from teams.models import TeamNotificationSetting
return team_tasks.api_notify_on_video_activity.delay(
sender.team.pk,
TeamNotificationSetting.EVENT_VIDEO_NEW,
sender.video.video_id )
def api_on_application_new(sender, **kwargs):
from teams.models import TeamNotificationSetting
return _execute_application_task(sender, TeamNotificationSetting.EVENT_APPLICATION_NEW)
def api_on_language_deleted(sender, **kwargs):
from teams.models import TeamNotificationSetting
return _execute_language_task(
sender, TeamNotificationSetting.EVENT_LANGUAGE_DELETED)
#: Actual available signals
api_subtitles_edited = dispatch.Signal(providing_args=["version"])
api_subtitles_approved = dispatch.Signal(providing_args=["version"])
api_subtitles_rejected = dispatch.Signal(providing_args=["version"])
api_language_edited = dispatch.Signal(providing_args=["language"])
api_language_deleted = dispatch.Signal()
api_video_edited = dispatch.Signal(providing_args=["video"])
api_language_new = dispatch.Signal(providing_args=["language"])
api_teamvideo_new = dispatch.Signal(providing_args=["video"])
api_application_new = dispatch.Signal(providing_args=["application"])
# connect handlers
api_subtitles_edited.connect(api_on_subtitles_edited)
api_subtitles_approved.connect(api_on_subtitles_approved)
api_subtitles_rejected.connect(api_on_subtitles_rejected)
api_language_edited.connect(api_on_language_edited)
api_language_new.connect(api_on_language_new)
api_language_deleted.connect(api_on_language_deleted)
api_video_edited.connect(api_on_video_edited)
api_teamvideo_new.connect(api_on_teamvideo_new)
api_application_new.connect(api_on_application_new)
| agpl-3.0 | -2,416,420,001,652,783,600 | 40.346154 | 91 | 0.732359 | false |
robwarm/gpaw-symm | gpaw/test/cmrtest/cmr_test4.py | 1 | 1281 | # This test makes sure that the i/o interfaces work with CMR.
# CMR itself does not have to be installed for this test.
#
# The reason why CMR cannot use direct writes to DB/GPAW files is that
# GPAW cannot always write a GPAW without performing a new calculation e.g.
# GPAW(filename).write(...)
# fails in some rare cases.
import os
from ase import Atom, Atoms
from ase.calculators.emt import EMT
import warnings
# cmr calls all available methods in ase.atoms detected by the module inspect.
# Therefore also deprecated methods are called - and we choose to silence those warnings.
warnings.filterwarnings('ignore', 'ase.atoms.*deprecated',)
import cmr
# from cmr.tools.log import Log
# cmr.logger.set_message_selection(Log.MSG_TYPE_ALL)
a = 4.05
d = a / 2 ** 0.5
bulk = Atoms([Atom('Al', (0, 0, 0)),
Atom('Al', (0.5, 0.5, 0.5))],
pbc=True)
bulk.set_cell((d, d, a), scale_atoms=True)
h = 0.3
bulk.set_calculator(EMT())
e0 = bulk.get_potential_energy()
bulk.write("cmr_test4.traj")
bulk.write("cmr_test4a.cmr")
cmr.convert({"input":"cmr_test4.traj", "output":"cmr_test4.cmr"})
data = cmr.read("cmr_test4.cmr")
data.dump()
group = cmr.create_group()
group.add(data)
group.write("cmr_group4.cmr")
g = cmr.read("cmr_group4.cmr")
g.dump_all()
| gpl-3.0 | -6,507,146,153,682,806,000 | 27.466667 | 89 | 0.69477 | false |
hiroara/remotefile-python | spec/s3_file_spec.py | 1 | 4783 | from remotefile import S3File, RemoteFile
from expects import *
from tempfile import TemporaryDirectory, NamedTemporaryFile
from unittest.mock import patch, MagicMock
import os, re
from boto.s3.bucket import Bucket
from boto.s3.key import Key
with description(S3File):
with before.each:
self.sample_url = 's3://ari-hiro.com/example/remotefile-python/test.txt'
self.cache_dir = TemporaryDirectory()
self.region = 'ap-northeast-1'
self.remote_file = RemoteFile.build(self.sample_url, cache_dir=self.cache_dir.name, region_name=self.region)
self.bucket = Bucket()
self.bucket.get_key = MagicMock('get_key')
self.mocked_obj = Key(self.remote_file.url.netloc, self.remote_file.url.path)
with after.each:
self.cache_dir.cleanup()
with it('should recognized as an local file'):
expect(self.remote_file.is_s3_file()).to(be_true)
expect(self.remote_file.is_http_file()).to(be_false)
expect(self.remote_file.is_local_file()).to(be_false)
with description('get_local_path method'):
with it('should return url'):
under_cache_path = self.remote_file.get_local_path().split(self.cache_dir.name)[1]
expected_path = os.path.join('/', self.region, self.remote_file.url.netloc, re.sub('^/', '', self.remote_file.url.path))
expect(under_cache_path).to(equal(expected_path))
with description('exists method'):
with context('when file exists'):
with before.each:
self.bucket.get_key.return_value = self.mocked_obj
with it('should return True'):
with patch.object(S3File, '_S3File__get_s3_bucket', return_value=self.bucket):
expect(self.remote_file.exists()).to(be_true)
with context('when file does not exist'):
with before.each:
self.bucket.get_key.return_value = None
with it('should return False'):
with patch.object(S3File, '_S3File__get_s3_bucket', return_value=self.bucket):
expect(self.remote_file.exists()).to(be_false)
with description('download method'):
with context('when file exists'):
with before.each:
self.bucket.get_key.return_value = self.mocked_obj
with it('should download to local and return True'):
with patch.object(S3File, '_S3File__get_s3_bucket', return_value=self.bucket):
with patch.object(self.mocked_obj, 'get_contents_to_file') as getter:
expect(self.remote_file.download()).to(be_true)
expect(getter.called).to(be_true)
first_arg = getter.call_args[0][0].name
expect(first_arg).to(equal(self.remote_file.get_file_path()))
with context('when file does not exist'):
with before.each:
self.bucket.get_key.return_value = None
with it('should return False'):
with patch.object(S3File, '_S3File__get_s3_bucket', return_value=self.bucket):
with patch.object(self.mocked_obj, 'get_contents_to_file') as getter:
expect(self.remote_file.download()).to(be_false)
expect(getter.called).to(be_false)
with description('upload method'):
with context('when src file exists'):
with before.each:
self.src_temp_file = NamedTemporaryFile()
with open(self.src_temp_file.name, 'w') as f: f.write('Some content')
self.src_file = RemoteFile.build(self.src_temp_file.name)
with it('should upload to provided url'):
with patch.object(S3File, '_S3File__get_s3_bucket', return_value=self.bucket):
with patch.object(Key, 'set_contents_from_filename') as setter:
expect(self.remote_file.upload(self.src_file)).to(be_true)
expect(setter.called).to(be_true)
first_arg = setter.call_args[0][0]
expect(first_arg).to(equal(self.src_file.get_file_path()))
with context('when src file does not exist'):
with before.each:
self.src_temp_file = NamedTemporaryFile()
self.src_temp_file.close() # delete
self.src_file = RemoteFile.build(self.src_temp_file.name)
with it('should upload to provided url'):
with patch.object(S3File, '_S3File__get_s3_bucket', return_value=self.bucket) as get_s3_bucket:
expect(self.remote_file.upload(self.src_file)).to(be_false)
expect(get_s3_bucket.called).to(be_false)
| mit | 7,604,493,952,905,030,000 | 44.552381 | 132 | 0.595024 | false |
specify/specify7 | specifyweb/workbench/upload/tests/test_bugs.py | 1 | 8187 |
import io
import json
import csv
from pprint import pprint
from unittest import skip
from datetime import datetime
from decimal import Decimal
from ..uploadable import Exclude
from ..upload_result import Uploaded, UploadResult, Matched, FailedBusinessRule, ReportInfo, TreeInfo
from ..upload_table import UploadTable, ScopedUploadTable, _to_many_filters_and_excludes, BoundUploadTable
from ..treerecord import TreeRecord, TreeDefItemWithParseResults
from ..upload import do_upload_csv
from ..upload_plan_schema import parse_plan
from .base import UploadTestsBase, get_table
class BugTests(UploadTestsBase):
def test_duplicate_refworks(self) -> None:
""" Andy found that duplicate reference works were being created from data similar to the following. """
reader = csv.DictReader(io.StringIO(
'''Catalog number,Type,Title,Volume,Pages,Date,DOI,URL,Author last name 1,Author first name 1,Author MI 1,Author last name 2,Author first name 2,Author MI 2,Author last name 3,Author first name 3,Author MI 3
10026,1,catfish,282,315,1969,10.5479/si.03629236.282.1,https://doi.org/10.5479/si.03629236.282.1,Taylor,William,R,,,,,,
10168,1,catfish,282,315,1969,10.5479/si.03629236.282.1,https://doi.org/10.5479/si.03629236.282.1,Taylor,William,R,,,,,,
10194,1,catfish,282,315,1969,10.5479/si.03629236.282.1,https://doi.org/10.5479/si.03629236.282.1,Taylor,William,R,,,,,,
10199,1,catfish,282,315,1969,10.5479/si.03629236.282.1,https://doi.org/10.5479/si.03629236.282.1,Taylor,William,R,,,,,,
10206,1,catfish,282,315,1969,10.5479/si.03629236.282.1,https://doi.org/10.5479/si.03629236.282.1,Taylor,William,R,,,,,,
1861,1,pearl,1686,1-28,2008,10.11646/zootaxa.1686.1.1,https://doi.org/10.11646/zootaxa.1686.1.1,Conway,Kevin,W,Chen,,Wei-Jen,Mayden,Richard,L
5311,1,pearl,1686,1-28,2008,10.11646/zootaxa.1686.1.1,https://doi.org/10.11646/zootaxa.1686.1.1,Conway,Kevin,W,Chen,,Wei-Jen,Mayden,Richard,L
5325,1,pearl,1686,1-28,2008,10.11646/zootaxa.1686.1.1,https://doi.org/10.11646/zootaxa.1686.1.1,Conway,Kevin,W,Chen,,Wei-Jen,Mayden,Richard,L
5340,1,nepal,1047,1-19,2005,10.11646/zootaxa.1047.1.1,https://doi.org/10.11646/zootaxa.1047.1.1,Ng,Heok,H,Edds,David,R,,,
5362,1,nepal,1047,1-19,2005,10.11646/zootaxa.1047.1.1,https://doi.org/10.11646/zootaxa.1047.1.1,Ng,Heok,H,Edds,David,R,,,
5282,1,nepal,1047,1-19,2005,10.11646/zootaxa.1047.1.1,https://doi.org/10.11646/zootaxa.1047.1.1,Ng,Heok,H,Edds,David,R,,,
5900,1,nepal,1047,1-19,2005,10.11646/zootaxa.1047.1.1,https://doi.org/10.11646/zootaxa.1047.1.1,Ng,Heok,H,Edds,David,R,,,
6527,1,Centrum,44,721-732,2007,10.1139/e06-137,https://doi.org/10.1139/e06-137,Newbrey,Michael,G,Wilson,Mark,VH,Ashworth,Allan,C
7350,1,Centrum,44,721-732,2007,10.1139/e06-137,https://doi.org/10.1139/e06-137,Newbrey,Michael,G,Wilson,Mark,VH,Ashworth,Allan,C
7357,1,Centrum,44,721-732,2007,10.1139/e06-137,https://doi.org/10.1139/e06-137,Newbrey,Michael,G,Wilson,Mark,VH,Ashworth,Allan,C
7442,1,The Clupeocephala,45,635-657,2010,10.4067/S0718-19572010000400009,https://doi.org/10.4067/S0718-19572010000400009,Arratia,Gloria,,,,,,,
7486,1,The Clupeocephala,45,635-657,2010,10.4067/S0718-19572010000400009,https://doi.org/10.4067/S0718-19572010000400009,Arratia,Gloria,,,,,,,
7542,1,The Clupeocephala,45,635-657,2010,10.4067/S0718-19572010000400009,https://doi.org/10.4067/S0718-19572010000400009,Arratia,Gloria,,,,,,,
7588,1,The Clupeocephala,45,635-657,2010,10.4067/S0718-19572010000400009,https://doi.org/10.4067/S0718-19572010000400009,Arratia,Gloria,,,,,,,
7602,1,The Clupeocephala,45,635-657,2010,10.4067/S0718-19572010000400009,https://doi.org/10.4067/S0718-19572010000400009,Arratia,Gloria,,,,,,,
'''))
expected = [
Uploaded, # 10026,1,catfish,282,315,1969,10.5479/si.03629236.282.1,https://doi.org/10.5479/si.03629236.282.1,Taylor,William,R,,,,,,
Matched, # 10168,1,catfish,282,315,1969,10.5479/si.03629236.282.1,https://doi.org/10.5479/si.03629236.282.1,Taylor,William,R,,,,,,
Matched, # 10194,1,catfish,282,315,1969,10.5479/si.03629236.282.1,https://doi.org/10.5479/si.03629236.282.1,Taylor,William,R,,,,,,
Matched, # 10199,1,catfish,282,315,1969,10.5479/si.03629236.282.1,https://doi.org/10.5479/si.03629236.282.1,Taylor,William,R,,,,,,
Matched, # 10206,1,catfish,282,315,1969,10.5479/si.03629236.282.1,https://doi.org/10.5479/si.03629236.282.1,Taylor,William,R,,,,,,
Uploaded, # 1861,1,pearl,1686,1-28,2008,10.11646/zootaxa.1686.1.1,https://doi.org/10.11646/zootaxa.1686.1.1,Conway,Kevin,W,Chen,,Wei-Jen,Mayden,Richard,L
Matched, # 5311,1,pearl,1686,1-28,2008,10.11646/zootaxa.1686.1.1,https://doi.org/10.11646/zootaxa.1686.1.1,Conway,Kevin,W,Chen,,Wei-Jen,Mayden,Richard,L
Matched, # 5325,1,pearl,1686,1-28,2008,10.11646/zootaxa.1686.1.1,https://doi.org/10.11646/zootaxa.1686.1.1,Conway,Kevin,W,Chen,,Wei-Jen,Mayden,Richard,L
Uploaded, # 5340,1,nepal,1047,1-19,2005,10.11646/zootaxa.1047.1.1,https://doi.org/10.11646/zootaxa.1047.1.1,Ng,Heok,H,Edds,David,R,,,
Matched, # 5362,1,nepal,1047,1-19,2005,10.11646/zootaxa.1047.1.1,https://doi.org/10.11646/zootaxa.1047.1.1,Ng,Heok,H,Edds,David,R,,,
Matched, # 5282,1,nepal,1047,1-19,2005,10.11646/zootaxa.1047.1.1,https://doi.org/10.11646/zootaxa.1047.1.1,Ng,Heok,H,Edds,David,R,,,
Matched, # 5900,1,nepal,1047,1-19,2005,10.11646/zootaxa.1047.1.1,https://doi.org/10.11646/zootaxa.1047.1.1,Ng,Heok,H,Edds,David,R,,,
Uploaded, # 6527,1,Centrum,44,721-732,2007,10.1139/e06-137,https://doi.org/10.1139/e06-137,Newbrey,Michael,G,Wilson,Mark,VH,Ashworth,Allan,C
Matched, # 7350,1,Centrum,44,721-732,2007,10.1139/e06-137,https://doi.org/10.1139/e06-137,Newbrey,Michael,G,Wilson,Mark,VH,Ashworth,Allan,C
Matched, # 7357,1,Centrum,44,721-732,2007,10.1139/e06-137,https://doi.org/10.1139/e06-137,Newbrey,Michael,G,Wilson,Mark,VH,Ashworth,Allan,C
Uploaded, # 7442,1,The Clupeocephala,45,635-657,2010,10.4067/S0718-19572010000400009,https://doi.org/10.4067/S0718-19572010000400009,Arratia,Gloria,,,,,,,
Matched, # 7486,1,The Clupeocephala,45,635-657,2010,10.4067/S0718-19572010000400009,https://doi.org/10.4067/S0718-19572010000400009,Arratia,Gloria,,,,,,,
Matched, # 7542,1,The Clupeocephala,45,635-657,2010,10.4067/S0718-19572010000400009,https://doi.org/10.4067/S0718-19572010000400009,Arratia,Gloria,,,,,,,
Matched, # 7588,1,The Clupeocephala,45,635-657,2010,10.4067/S0718-19572010000400009,https://doi.org/10.4067/S0718-19572010000400009,Arratia,Gloria,,,,,,,
Matched, # 7602,1,The Clupeocephala,45,635-657,2010,10.4067/S0718-19572010000400009,https://doi.org/10.4067/S0718-19572010000400009,Arratia,Gloria,,,,,,,
]
plan = parse_plan(self.collection, json.loads('''
{
"baseTableName": "referencework",
"uploadable": {
"uploadTable": {
"wbcols": {
"referenceworktype": "Type",
"title": "Title"
},
"static": {},
"toOne": {},
"toMany": {
"authors": [
{
"wbcols": {},
"static": {},
"toOne": {
"agent": {
"uploadTable": {
"wbcols": {
"lastname": "Author last name 1"
},
"static": {"agenttype": 1},
"toOne": {},
"toMany": {}
}
}
}
},
{
"wbcols": {},
"static": {},
"toOne": {
"agent": {
"uploadTable": {
"wbcols": {
"lastname": "Author last name 2"
},
"static": {"agenttype": 1},
"toOne": {},
"toMany": {}
}
}
}
},
{
"wbcols": {},
"static": {},
"toOne": {
"agent": {
"uploadTable": {
"wbcols": {
"lastname": "Author last name 3"
},
"static": {"agenttype": 1},
"toOne": {},
"toMany": {}
}
}
}
}
]
}
}
}
}
'''))
upload_results = do_upload_csv(self.collection, reader, plan.apply_scoping(self.collection), self.agent.id)
rr = [r.record_result.__class__ for r in upload_results]
self.assertEqual(expected, rr)
| gpl-2.0 | 6,606,023,491,129,068,000 | 55.854167 | 207 | 0.671064 | false |
perplexes/couchapp | python/couchapp/utils/__init__.py | 1 | 2821 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Benoit Chesneau <[email protected]>
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
import codecs
import os
import sys
import urlparse
import urllib
# compatibility with python 2.4
try:
from hashlib import md5 as _md5
except ImportError:
import md5
_md5 = md5.new
try:
import json
except ImportError:
import simplejson as json
def in_couchapp():
current_path = os.getcwd()
old_dirs = []
while 1:
dirs = os.listdir(current_path)
if dirs == old_dirs:
return False
if '.couchapprc' in dirs: break
current_path = os.path.normpath(os.path.join(current_path, '../'))
old_dirs = dirs
return current_path
def parse_uri(string):
parts = urlparse.urlsplit(urllib.unquote(string))
if parts[0] != 'http' and parts[0] != 'https':
raise ValueError('Invalid dbstring')
path = parts[2].strip('/').split('/')
dbname = ''
docid = ''
if len(path) >= 1:
db_parts=[]
i = 0
while 1:
try:
p = path[i]
except IndexError:
break
if p == '_design': break
db_parts.append(p)
i = i + 1
dbname = '/'.join(db_parts)
if i < len(path) - 1:
docid = '/'.join(path[i:])
server_uri = '%s://%s' % (parts[0], parts[1])
return server_uri, dbname, docid
def parse_auth(string):
parts = urlparse.urlsplit(urllib.unquote(string))
server_parts = parts[1].split('@')
if ":" in server_parts[0]:
username, password = server_parts[0].split(":")
else:
username = server_parts[0]
password = ''
server_uri = "%s://%s" % (parts[0], server_parts[1])
return username, password, server_uri
def get_appname(docid):
return docid.split('_design/')[1]
def read_file(fname):
f = codecs.open(fname, 'rb', "utf-8")
data = f.read()
f.close()
return data
def sign_file(file_path):
if os.path.isfile(file_path):
f = open(file_path, 'rb')
content = f.read()
f.close()
return _md5(content).hexdigest()
return ''
def write_content(filename, content):
f = open(filename, 'wb')
f.write(content)
f.close
def write_json(filename, content):
write_content(filename, json.dumps(content))
def read_json(filename):
try:
data = read_file(filename)
except IOError, e:
if e[0] == 2:
return {}
raise
try:
data = json.loads(data)
except ValueError:
print >>sys.stderr, "Json is invalid, can't load %s" % filename
return {}
return data
| apache-2.0 | 5,177,487,938,545,326,000 | 21.934959 | 74 | 0.56682 | false |
axbaretto/beam | sdks/python/apache_beam/transforms/external_test_it.py | 1 | 2343 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration tests for cross-language transform expansion."""
# pytype: skip-file
from __future__ import absolute_import
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam import Pipeline
from apache_beam.runners.portability import expansion_service
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import ptransform
class ExternalTransformIT(unittest.TestCase):
@attr('IT')
def test_job_python_from_python_it(self):
@ptransform.PTransform.register_urn('simple', None)
class SimpleTransform(ptransform.PTransform):
def expand(self, pcoll):
return pcoll | beam.Map(lambda x: 'Simple(%s)' % x)
def to_runner_api_parameter(self, unused_context):
return 'simple', None
@staticmethod
def from_runner_api_parameter(_1, _2):
return SimpleTransform()
pipeline = TestPipeline(is_integration_test=True)
res = (
pipeline
| beam.Create(['a', 'b'])
| beam.ExternalTransform(
'simple', None, expansion_service.ExpansionServiceServicer()))
assert_that(res, equal_to(['Simple(a)', 'Simple(b)']))
proto_pipeline, _ = pipeline.to_runner_api(return_context=True)
pipeline_from_proto = Pipeline.from_runner_api(
proto_pipeline, pipeline.runner, pipeline._options)
pipeline_from_proto.run().wait_until_finish()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,580,363,643,732,151,000 | 33.455882 | 74 | 0.725992 | false |
tiredpixel/pikka-bird-collector-py | pikka_bird_collector/collectors/postgresql.py | 1 | 4994 | from pikka_bird_collector.parsers.table import Table as Parser
from .base_port_command import BasePortCommand, Base
class Postgresql(BasePortCommand):
"""
Collector for PostgreSQL (http://www.postgresql.org/).
The collector is enabled whenever non-empty settings are passed.
Multiple instances running on the same box are supported; just specify
each port within settings.
By default, core status and replication status are gathered. Optionally,
settings can be gathered.
For consistency, `username` is called `user`.
DEPENDENCIES:
psql
Available in PATH.
SETTINGS:
minimal:
{
5432: None}
supported:
{
5432: {
'user': "USER",
'collect': {
'stat_replication': False,
'settings': True}}}
"""
COLLECT_SETTING_DEFAULTS = {
'stat_replication': True,
'settings': False}
CMD_STATUS = """
SELECT
inet_client_addr(),
inet_client_port(),
inet_server_addr(),
inet_server_port(),
pg_backend_pid(),
pg_backup_start_time(),
pg_conf_load_time(),
(CASE pg_is_in_backup()
WHEN 'f' THEN pg_current_xlog_insert_location()
END) AS pg_current_xlog_insert_location,
(CASE pg_is_in_backup()
WHEN 'f' THEN pg_current_xlog_location()
END) AS pg_current_xlog_location,
(CASE pg_is_in_backup()
WHEN 't' THEN 'on'
WHEN 'f' THEN 'off'
END) AS pg_is_in_backup,
(CASE pg_is_in_recovery()
WHEN 't' THEN 'on'
WHEN 'f' THEN 'off'
END) AS pg_is_in_recovery,
(CASE pg_is_in_recovery()
WHEN 't' THEN (CASE pg_is_xlog_replay_paused()
WHEN 't' THEN 'on'
WHEN 'f' THEN 'off'
END)
END) AS pg_is_xlog_replay_paused,
pg_last_xact_replay_timestamp(),
pg_last_xlog_receive_location(),
pg_last_xlog_replay_location(),
pg_postmaster_start_time(),
extract(epoch from (now() - pg_postmaster_start_time())) AS uptime_s,
version()
""".replace('\n', ' ')
CMD_SETTINGS = 'SELECT name, setting FROM pg_settings'
CMD_STAT_REPLICATION = 'SELECT * FROM pg_stat_replication'
PARSE_BOOLS = {
'on': True,
'off': False}
@staticmethod
def command_tool(port, settings, command):
settings = settings or {}
c = []
c.extend(['psql',
'--host', '127.0.0.1', # socket not (yet) supported
'--port', port,
'--dbname', 'template1',
'--command', command,
'--no-password',
'--quiet',
'--no-align',
'--pset=footer=off'])
if settings.get('user'):
c.append('--username=%s' % settings['user'])
return c
def collect_port(self, port, settings):
metrics = {}
o = self.command_output(port, settings, self.CMD_STATUS)
parser = Parser(
delim_col='|',
converter_key=Base.parse_str_setting_key,
converter_value=Postgresql.__parse_str_setting_value,
transpose=True)
ms = parser.parse(o)
if len(ms):
metrics['status'] = ms
else:
return metrics # service down; give up
if self.collect_setting('stat_replication', settings):
o = self.command_output(port, settings, self.CMD_STAT_REPLICATION)
parser = Parser(
delim_col='|',
converter_key=Base.parse_str_setting_key,
converter_value=Postgresql.__parse_str_setting_value,
tag_header_col='pid')
ms = parser.parse(o)
if len(ms):
metrics['stat_replication'] = ms
if self.collect_setting('settings', settings):
o = self.command_output(port, settings, self.CMD_SETTINGS)
parser = Parser(
delim_col='|',
converter_key=Base.parse_str_setting_key,
converter_value=Postgresql.__parse_str_setting_value)
ms = parser.parse(o)
if len(ms):
metrics['settings'] = ms
return metrics
@staticmethod
def __parse_str_setting_value(value):
v = Base.parse_str_setting_value(value)
if v in Postgresql.PARSE_BOOLS:
v = Postgresql.PARSE_BOOLS[v]
return v
| mit | -1,248,612,989,321,853,400 | 32.072848 | 80 | 0.489187 | false |
Subsets and Splits