repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
danirus/django-comments-xtd | django_comments_xtd/admin.py | 1 | 1729 | from __future__ import unicode_literals
from django.contrib import admin
from django_comments import get_model
from django_comments.admin import CommentsAdmin
from django_comments.models import CommentFlag
from django_comments_xtd.models import XtdComment, BlackListedDomain
class XtdCommentsAdmin(CommentsAdmin):
list_display = ('cid', 'thread_level', 'nested_count', 'name',
'content_type', 'object_pk', 'ip_address', 'submit_date',
'followup', 'is_public', 'is_removed')
list_display_links = ('cid',)
list_filter = ('content_type', 'is_public', 'is_removed', 'followup')
fieldsets = (
(None, {'fields': ('content_type', 'object_pk', 'site')}),
('Content', {'fields': ('user', 'user_name', 'user_email',
'user_url', 'comment', 'followup')}),
('Metadata', {'fields': ('submit_date', 'ip_address',
'is_public', 'is_removed')}),
)
date_hierarchy = 'submit_date'
ordering = ('thread_id', 'order')
search_fields = ['object_pk', 'user__username', 'user_name', 'user_email',
'comment']
def thread_level(self, obj):
rep = '|'
if obj.level:
rep += '-' * obj.level
rep += " c%d to c%d" % (obj.id, obj.parent_id)
else:
rep += " c%d" % obj.id
return rep
def cid(self, obj):
return 'c%d' % obj.id
class BlackListedDomainAdmin(admin.ModelAdmin):
search_fields = ['domain']
if get_model() is XtdComment:
admin.site.register(XtdComment, XtdCommentsAdmin)
admin.site.register(CommentFlag)
admin.site.register(BlackListedDomain, BlackListedDomainAdmin)
| bsd-2-clause | 6,129,600,062,988,580,000 | 34.285714 | 78 | 0.585309 | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/conf/locale/eo/formats.py | 1 | 2387 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j\-\a \d\e F Y' # '26-a de julio 1887'
TIME_FORMAT = 'H:i' # '18:59'
DATETIME_FORMAT = r'j\-\a \d\e F Y\, \j\e H:i' # '26-a de julio 1887, je 18:59'
YEAR_MONTH_FORMAT = r'F \d\e Y' # 'julio de 1887'
MONTH_DAY_FORMAT = r'j\-\a \d\e F' # '26-a de julio'
SHORT_DATE_FORMAT = 'Y-m-d' # '1887-07-26'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i' # '1887-07-26 18:59'
FIRST_DAY_OF_WEEK = 1 # Monday (lundo)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '1887-07-26'
'%y-%m-%d', # '87-07-26'
'%Y %m %d', # '1887 07 26'
'%d-a de %b %Y', # '26-a de jul 1887'
'%d %b %Y', # '26 jul 1887'
'%d-a de %B %Y', # '26-a de julio 1887'
'%d %B %Y', # '26 julio 1887'
'%d %m %Y', # '26 07 1887'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '18:59:00'
'%H:%M', # '18:59'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '1887-07-26 18:59:00'
'%Y-%m-%d %H:%M', # '1887-07-26 18:59'
'%Y-%m-%d', # '1887-07-26'
'%Y.%m.%d %H:%M:%S', # '1887.07.26 18:59:00'
'%Y.%m.%d %H:%M', # '1887.07.26 18:59'
'%Y.%m.%d', # '1887.07.26'
'%d/%m/%Y %H:%M:%S', # '26/07/1887 18:59:00'
'%d/%m/%Y %H:%M', # '26/07/1887 18:59'
'%d/%m/%Y', # '26/07/1887'
'%y-%m-%d %H:%M:%S', # '87-07-26 18:59:00'
'%y-%m-%d %H:%M', # '87-07-26 18:59'
'%y-%m-%d', # '87-07-26'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| mit | -3,731,351,361,881,060,400 | 43.903846 | 80 | 0.412652 | false |
marcharper/python-ternary | examples/scatter_colorbar.py | 1 | 4211 | """An example of the colorbar display on the scatter plot."""
import ternary
import matplotlib.pyplot as plt
def _en_to_enth(energy, concs, A, B, C):
"""Converts an energy to an enthalpy.
Converts energy to enthalpy using the following formula:
Enthalpy = energy - (energy contribution from A) - (energy contribution from B) -
(energy contribution from C)
An absolute value is taken afterward for convenience.
Parameters
----------
energy : float
The energy of the structure
concs : list of floats
The concentrations of each element
A : float
The energy of pure A
B : float
The energy of pure B
C : float
The energy of pure C
Returns
-------
enth : float
The enthalpy of formation.
"""
enth = abs(energy - concs[0]*A - concs[1] * B - concs[2] * C)
return enth
def _energy_to_enthalpy(energy):
"""Converts energy to enthalpy.
This function take the energies stored in the energy array and
converts them to formation enthalpy.
Parameters
---------
energy : list of lists of floats
Returns
-------
enthalpy : list of lists containing the enthalpies.
"""
pureA = [energy[0][0], energy[0][1]]
pureB = [energy[1][0], energy[1][1]]
pureC = [energy[2][0], energy[2][1]]
enthalpy = []
for en in energy:
c = en[2]
conc = [float(i) / sum(c) for i in c]
CE = _en_to_enth(en[0], conc, pureA[0], pureB[0], pureC[0])
VASP = _en_to_enth(en[1], conc, pureA[1], pureB[1], pureC[1])
enthalpy.append([CE, VASP, c])
return enthalpy
def _find_error(vals):
"""Find the errors in the energy values.
This function finds the errors in the enthalpys.
Parameters
----------
vals : list of lists of floats
Returns
-------
err_vals : list of lists containing the errors.
"""
err_vals = []
for en in vals:
c = en[2]
conc = [float(i) / sum(c) for i in c]
err = abs(en[0] - en[1])
err_vals.append([conc, err])
return err_vals
def _read_data(fname):
"""Reads data from file.
Reads the data in 'fname' into a list where each list entry contains
[energy predicted, energy calculated, list of concentrations].
Parameters
----------
fname : str
The name and path to the data file.
Returns
-------
energy : list of lists of floats
A list of the energies and the concentrations.
"""
energy = []
with open(fname,'r') as f:
for line in f:
CE = abs(float(line.strip().split()[0]))
VASP = abs(float(line.strip().split()[1]))
conc = [i for i in line.strip().split()[2:]]
conc_f = []
for c in conc:
if '[' in c and ']' in c:
conc_f.append(int(c[1:-1]))
elif '[' in c:
conc_f.append(int(c[1:-1]))
elif ']' in c or ',' in c:
conc_f.append(int(c[:-1]))
else:
conc_f.append(int(c))
energy.append([CE, VASP, conc_f])
return energy
def conc_err_plot(fname):
"""Plots the error in the CE data.
This plots the error in the CE predictions within a ternary concentration diagram.
Parameters
----------
fname : string containing the input file name.
"""
energies = _read_data(fname)
enthalpy = _energy_to_enthalpy(energies)
this_errors = _find_error(enthalpy)
points = []
colors = []
for er in this_errors:
concs = er[0]
points.append((concs[0] * 100, concs[1] * 100, concs[2] * 100))
colors.append(er[1])
scale = 100
figure, tax = ternary.figure(scale=scale)
tax.boundary(linewidth=1.0)
tax.set_title("Errors in Convex Hull Predictions.", fontsize=20)
tax.gridlines(multiple=10, color="blue")
tax.scatter(points, vmax=max(colors), colormap=plt.cm.viridis, colorbar=True, c=colors, cmap=plt.cm.viridis)
tax.show()
if __name__ == "__main__":
conc_err_plot('sample_data/scatter_colorbar.txt')
| mit | 334,574,868,188,572,740 | 24.521212 | 112 | 0.560912 | false |
ITCase/ps_gallery | pyramid_sacrud_gallery/tests/test_models.py | 1 | 3733 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 Petr Zelenin ([email protected])
#
# Distributed under terms of the MIT license.
import hashlib
import os
import unittest
import transaction
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from zope.sqlalchemy import ZopeTransactionExtension
from pyramid_sacrud.exceptions import SacrudMessagedException
from . import (
add_fixture,
Base,
Gallery, GalleryItem, GalleryItemM2M,
TEST_DATABASE_CONNECTION_STRING,
)
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
def add_data(session):
galleries = [
{'pk': 1, 'name': 'Best gallery',
'description': 'Full description of gallery'},
{'pk': 2, 'name': 'Another best gallery',
'description': 'Another full description of gallery'},
]
add_fixture(Gallery, galleries, session)
items = []
gallery_items_m2m = []
for gallery in galleries:
for x in xrange(1, 10):
image = '{name}-{salt}.jpg'.format(name=x, salt=gallery['pk'])
image_abspath = GalleryItem.get_upload_path()
image_hash_base = os.path.join(image_abspath, image)
image_hash = hashlib.md5(image_hash_base).hexdigest()
items.append({
'image': image,
'description': 'This is image with hash "%s"' % image_hash
})
gallery_items_m2m.append({
'gallery_id': gallery['pk'],
'item_id': image_hash,
})
add_fixture(GalleryItem, items, session)
add_fixture(GalleryItemM2M, gallery_items_m2m, session)
class TestGallery(unittest.TestCase):
def setUp(self):
engine = create_engine(TEST_DATABASE_CONNECTION_STRING)
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
add_data(DBSession)
def tearDown(self):
DBSession.remove()
def test_mixins_attrs(self):
"""Check mixins attrs auto apply to classes."""
self.assertEqual(Gallery.get_pk(), 'pk')
self.assertEqual(Gallery.get_db_pk(), 'id')
self.assertEqual(Gallery.__tablename__, 'gallery')
self.assertEqual(GalleryItem.get_pk(), 'pk')
self.assertEqual(GalleryItem.get_db_pk(), 'id')
self.assertEqual(GalleryItem.__tablename__, 'galleryitem')
self.assertEqual(GalleryItemM2M.__tablename__, 'galleryitemm2m')
def test_instances_attrs(self):
"""Check attrs and methods available only for instances."""
gallery = DBSession.query(Gallery).first()
self.assertEqual(gallery.__repr__(), gallery.name)
self.assertEqual(gallery.get_val_pk(), 1)
image = DBSession.query(GalleryItem).filter(GalleryItem.pk == 1).one()
self.assertIn(image.image_hash, image.__repr__())
def test_mixins_fks(self):
"""Check GalleryItemM2MMixin has ForeignKeys to GalleryMixin
and GalleryItemMixin."""
self.assertTrue(hasattr(GalleryItemM2M, 'gallery_id'))
self.assertTrue(hasattr(GalleryItemM2M, 'item_id'))
def test_access_by_relations(self):
"""Check relations between GalleryMixin and GalleryItemMixin."""
gallery = DBSession.query(Gallery).first()
self.assertEqual(len(gallery.items), 9)
def test_unique_image_hash(self):
"""Check of deny to add non-unique image_hash."""
image = GalleryItem(image='1-1.jpg')
DBSession.add(image)
with self.assertRaises(SacrudMessagedException) as cm:
DBSession.query(GalleryItem).all()
self.assertIn('This image was uploaded earlier.', str(cm.exception))
| mit | -1,741,868,866,605,887,200 | 33.238532 | 78 | 0.643891 | false |
siavashk/pycpd | testing/affine_test.py | 1 | 1194 | import pytest
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from pycpd import AffineRegistration
def test_2D():
B = np.array([[1.0, 0.5], [0, 1.0]])
t = np.array([0.5, 1.0])
Y = np.loadtxt('data/fish_target.txt')
X = np.dot(Y, B) + np.tile(t, (np.shape(Y)[0], 1))
reg = AffineRegistration(**{'X': X, 'Y': Y})
TY, (B_reg, t_reg) = reg.register()
assert_array_almost_equal(B, B_reg)
assert_array_almost_equal(t, t_reg)
assert_array_almost_equal(X, TY)
def test_3D():
B = np.array([[1.0, 0.5, 0.0], [0, 1.0, 0.0], [0.0, 0.0, 1.0]])
t = np.array([0.5, 1.0, -2.0])
fish_target = np.loadtxt('data/fish_target.txt')
Y1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
Y1[:, :-1] = fish_target
Y2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
Y2[:, :-1] = fish_target
Y = np.vstack((Y1, Y2))
X = np.dot(Y, B) + np.tile(t, (np.shape(Y)[0], 1))
reg = AffineRegistration(**{'X': X, 'Y': Y})
TY, (B_reg, t_reg) = reg.register()
assert_array_almost_equal(B, B_reg)
assert_array_almost_equal(t, t_reg)
assert_array_almost_equal(X, TY)
| mit | 1,301,905,175,649,250,300 | 30.421053 | 72 | 0.577889 | false |
tomprince/gemrb | gemrb/GUIScripts/Spellbook.py | 1 | 16543 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2011 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# a library of any functions for spell(book) managment
import GemRB
import CommonTables
from GUIDefines import *
from ie_stats import *
from ie_action import ACT_LEFT, ACT_RIGHT
from ie_spells import SP_IDENTIFY, SP_SURGE, LSR_KNOWN, LSR_LEVEL, LSR_STAT
from ie_restype import RES_2DA
#################################################################
# this is in the operator module of the standard python lib
def itemgetter(*items):
if len(items) == 1:
item = items[0]
def g(obj):
return obj[item]
else:
def g(obj):
return tuple(obj[item] for item in items)
return g
#################################################################
# routines for the actionbar spell access code
def GetUsableMemorizedSpells(actor, BookType):
memorizedSpells = []
spellResRefs = []
for level in range (9):
spellCount = GemRB.GetMemorizedSpellsCount (actor, BookType, level, False)
for i in range (spellCount):
Spell0 = GemRB.GetMemorizedSpell (actor, BookType, level, i)
if not Spell0["Flags"]:
# depleted, so skip
continue
if Spell0["SpellResRef"] in spellResRefs:
# add another one, so we can get the count more cheaply later
spellResRefs.append (Spell0["SpellResRef"])
continue
spellResRefs.append (Spell0["SpellResRef"])
Spell = GemRB.GetSpell(Spell0["SpellResRef"])
Spell['BookType'] = BookType # just another sorting key
Spell['SpellIndex'] = GemRB.GetSpelldataIndex (actor, Spell["SpellResRef"], 1<<BookType) # crucial!
if Spell['SpellIndex'] == -1:
print "Error, memorized spell not found!", Spell["SpellResRef"]
Spell['SpellIndex'] += 1000 * 1<<BookType
memorizedSpells.append (Spell)
if not len(memorizedSpells):
return []
# count and remove the duplicates
memorizedSpells2 = []
for spell in memorizedSpells:
if spell["SpellResRef"] in spellResRefs:
spell['MemoCount'] = spellResRefs.count(spell["SpellResRef"])
while spell["SpellResRef"] in spellResRefs:
spellResRefs.remove(spell["SpellResRef"])
memorizedSpells2.append(spell)
return memorizedSpells2
def GetKnownSpells(actor, BookType):
knownSpells = []
spellResRefs = []
for level in range (9):
spellCount = GemRB.GetKnownSpellsCount (actor, BookType, level)
for i in range (spellCount):
Spell0 = GemRB.GetKnownSpell (actor, BookType, level, i)
if Spell0["SpellResRef"] in spellResRefs:
continue
spellResRefs.append (Spell0["SpellResRef"])
Spell = GemRB.GetSpell(Spell0["SpellResRef"])
Spell['BookType'] = BookType # just another sorting key
Spell['MemoCount'] = 0
Spell['SpellIndex'] = 1000 * 1<<BookType # this gets assigned properly later
knownSpells.append (Spell)
return knownSpells
def SortUsableSpells(memorizedSpells):
# sort it by using the spldisp.2da table
layout = CommonTables.SpellDisplay.GetValue ("USE_ROW", "ROWS")
layout = CommonTables.SpellDisplay.GetRowName (layout)
key1 = CommonTables.SpellDisplay.GetValue (layout, "KEY1")
key2 = CommonTables.SpellDisplay.GetValue (layout, "KEY2")
key3 = CommonTables.SpellDisplay.GetValue (layout, "KEY3")
if key1:
if key3 and key2:
memorizedSpells = sorted(memorizedSpells, key=itemgetter(key1, key2, key3))
elif key2:
memorizedSpells = sorted(memorizedSpells, key=itemgetter(key1, key2))
else:
memorizedSpells = sorted(memorizedSpells, key=itemgetter(key1))
return memorizedSpells
# Sets up all the (12) action buttons for a player character with different spell or innate icons.
# It also sets up the scroll buttons left and right if needed.
# If Start is supplied, it will skip the first few items (used when scrolling through the list)
# BookType is a spellbook type bitfield (1-mage, 2-priest, 4-innate)
# FIXME: iwd2 has even more types
# Offset is a control ID offset here for iwd2 purposes
def SetupSpellIcons(Window, BookType, Start=0, Offset=0):
actor = GemRB.GameGetFirstSelectedActor ()
# construct the spellbook of usable (not depleted) memorized spells
# the getters expect the BookType as: 0 priest, 1 mage, 2 innate
# we almost need bitfield support for cleric/mages and the like
if BookType == -1:
# Nahal's reckless dweomer can use any known spell
allSpells = GetKnownSpells (actor, IE_SPELL_TYPE_WIZARD)
else:
allSpells = []
if BookType & (1<<IE_SPELL_TYPE_PRIEST): #1
allSpells = GetUsableMemorizedSpells (actor, IE_SPELL_TYPE_PRIEST)
if BookType & (1<<IE_SPELL_TYPE_WIZARD): #2
allSpells += GetUsableMemorizedSpells (actor, IE_SPELL_TYPE_WIZARD)
if BookType & (1<<IE_SPELL_TYPE_INNATE): #4
allSpells += GetUsableMemorizedSpells (actor, IE_SPELL_TYPE_INNATE)
if not len(allSpells):
raise AttributeError ("Error, unknown BookType passed to SetupSpellIcons: %d! Bailing out!" %(BookType))
return
if BookType == -1:
memorizedSpells = allSpells
# reset Type, so we can choose the surgy spell instead of just getting a redraw of the action bar
GemRB.SetVar("Type", 3)
else:
memorizedSpells = SortUsableSpells(allSpells)
# start creating the controls
import GUICommonWindows
# TODO: ASCOL, ROWS
#AsCol = CommonTables.SpellDisplay.GetValue (layout, "AS_COL")
#Rows = CommonTables.SpellDisplay.GetValue (layout, "ROWS")
More = len(memorizedSpells) > 12
if not More and Start:
More = True
# scroll left button
if More:
Button = Window.GetControl (Offset)
Button.SetText ("")
if Start:
#Button.SetActionIcon(globals(), ACT_LEFT, 0)
GUICommonWindows.SetActionIconWorkaround (Button, ACT_LEFT, 0)
Button.SetState (IE_GUI_BUTTON_UNPRESSED)
else:
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetTooltip ("")
Button.SetState (IE_GUI_BUTTON_DISABLED)
# disable all spells if fx_disable_spellcasting was run with the same type
# but only if there are any spells of that type to disable
disabled_spellcasting = GemRB.GetPlayerStat(actor, IE_CASTING, 0)
actionLevel = GemRB.GetVar ("ActionLevel")
#order is: mage, cleric, innate, class, song, (defaults to 1, item)
spellSections = [2, 4, 8, 16, 16]
# create the spell icon buttons
buttonCount = 12 - More # GUIBT_COUNT in PCStatsStruct
for i in range (buttonCount):
Button = Window.GetControl (i+Offset+More)
if i+Start >= len(memorizedSpells):
Button.SetState (IE_GUI_BUTTON_DISABLED)
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetText ("")
Button.SetTooltip ("")
continue
Spell = memorizedSpells[i+Start]
spellType = Spell['SpellType']
if spellType > 4:
spellType = 1
else:
spellType = spellSections[spellType]
if BookType == -1:
Button.SetVarAssoc ("Spell", Spell['SpellIndex']+i+Start)
else:
Button.SetVarAssoc ("Spell", Spell['SpellIndex'])
# disable spells that should be cast from the inventory or can't be cast while silenced or ...
# see splspec.2da for all the reasons; silence is handled elsewhere
specialSpell = GemRB.CheckSpecialSpell(actor, Spell['SpellResRef'])
specialSpell = (specialSpell & SP_IDENTIFY) or ((specialSpell & SP_SURGE) and actionLevel == 5)
if specialSpell or (disabled_spellcasting&spellType):
Button.SetState (IE_GUI_BUTTON_DISABLED)
Button.EnableBorder(1, 0)
#Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUICommonWindows.UpdateActionsWindow) # noop if it worked or not :)
else:
Button.SetState (IE_GUI_BUTTON_UNPRESSED)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUICommonWindows.SpellPressed)
if Spell['SpellResRef']:
Button.SetSprites ("guibtbut", 0, 0,1,2,3)
Button.SetSpellIcon (Spell['SpellResRef'], 1)
Button.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_ALIGN_BOTTOM|IE_GUI_BUTTON_ALIGN_RIGHT, OP_SET)
Button.SetTooltip (Spell['SpellName'])
if Spell['MemoCount'] > 0 and BookType != -1:
Button.SetText (str(Spell['MemoCount']))
else:
Button.SetText ("")
# scroll right button
if More:
Button = Window.GetControl (Offset+buttonCount)
GUICommonWindows.SetActionIconWorkaround (Button, ACT_RIGHT, buttonCount)
Button.SetText ("")
if len(memorizedSpells) - Start > 10:
Button.SetState (IE_GUI_BUTTON_UNPRESSED)
else:
Button.SetState (IE_GUI_BUTTON_DISABLED)
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetTooltip ("")
#################################################################
# routines used during character generation and levelup
#################################################################
def GetMageSpells (Kit, Alignment, Level):
MageSpells = []
SpellType = 99
Table = GemRB.LoadTable ("aligns")
v = Table.FindValue (3, Alignment)
Usability = Kit | Table.GetValue(v, 5)
SpellsTable = GemRB.LoadTable ("spells")
for i in range(SpellsTable.GetValue ("MAGE", str(Level), 1) ):
SpellName = "SPWI%d%02d"%(Level,i+1)
ms = GemRB.GetSpell (SpellName, 1)
if ms == None:
continue
if Usability & ms['SpellExclusion']:
SpellType = 0
else:
SpellType = 1
if Kit & (1 << ms['SpellSchool']+5): # of matching specialist school
SpellType = 2
# Wild mage spells are of normal schools, so we have to find them
# separately. Generalists can learn any spell but the wild ones, so
# we check if the mage is wild and if a generalist wouldn't be able
# to learn the spell.
if Kit == 0x8000 and (0x4000 & ms['SpellExclusion']):
SpellType = 2
MageSpells.append ([SpellName, SpellType])
return MageSpells
def GetLearnableMageSpells (Kit, Alignment, Level):
Learnable = []
for Spell in GetMageSpells (Kit, Alignment, Level):
if Spell[1]:
Learnable.append (Spell[0])
return Learnable
def GetLearnablePriestSpells (Class, Alignment, Level):
Learnable =[]
Table=GemRB.LoadTable("aligns")
v = Table.FindValue(3, Alignment)
#usability is the bitset we look for
Usability=Table.GetValue(v, 5)
SpellsTable = GemRB.LoadTable ("spells")
for i in range(SpellsTable.GetValue ("PRIEST", str (Level), 1) ):
SpellName = "SPPR%d%02d"%(Level,i+1)
ms = GemRB.GetSpell(SpellName, 1)
if ms == None:
continue
if Class & ms['SpellDivine']:
continue
if Usability & ms['SpellExclusion']:
continue
Learnable.append (SpellName)
return Learnable
# there is no separate druid spell table in the originals
#FIXME: try to do this in a non-hard way?
def GetPriestSpellTable(tablename):
if not GemRB.HasResource (tablename, RES_2DA):
if tablename == "MXSPLDRU":
return "MXSPLPRS"
return tablename
def SetupSpellLevels (pc, TableName, Type, Level):
#don't die on a missing reference
tmp = GetPriestSpellTable(TableName)
if tmp != TableName:
SetupSpellLevels (pc, tmp, Type, Level)
return
Table = GemRB.LoadTable (TableName)
for i in range(Table.GetColumnCount ()):
# do a string lookup since some tables don't have entries for all levels
value = Table.GetValue (str(Level), str(i+1), 1)
# specialist mages get an extra spell if they already know that level
# FIXME: get a general routine to find specialists
school = GemRB.GetVar("MAGESCHOOL")
if Type == IE_SPELL_TYPE_WIZARD and school != 0:
if value > 0:
value += 1
GemRB.SetMemorizableSpellsCount (pc, value, Type, i)
return
def UnsetupSpellLevels (pc, TableName, Type, Level):
#don't die on a missing reference
tmp = GetPriestSpellTable(TableName)
if tmp != TableName:
UnsetupSpellLevels (pc, tmp, Type, Level)
return
Table = GemRB.LoadTable (TableName)
for i in range(Table.GetColumnCount ()):
GemRB.SetMemorizableSpellsCount (pc, 0, Type, i)
return
# Returns -1 if not found; otherwise, the index of the spell
def HasSpell (Actor, SpellType, Level, Ref):
# loop through each spell in the spell level and check for a matching ref
for i in range (GemRB.GetKnownSpellsCount (Actor, SpellType, Level)):
Spell = GemRB.GetKnownSpell(Actor, SpellType, Level, i)
if Spell["SpellResRef"].upper() == Ref.upper(): # ensure case is the same
return i
# not found
return -1
def CannotLearnSlotSpell ():
pc = GemRB.GameGetSelectedPCSingle ()
# disqualify sorcerors immediately
if GemRB.GetPlayerStat (pc, IE_CLASS) == 19:
return LSR_STAT
import GUICommon
if GUICommon.GameIsPST():
import GUIINV
slot, slot_item = GUIINV.ItemHash[GemRB.GetVar ('ItemButton')]
else:
slot_item = GemRB.GetSlotItem (pc, GemRB.GetVar ("ItemButton"))
spell_ref = GemRB.GetItem (slot_item['ItemResRef'], pc)['Spell']
spell = GemRB.GetSpell (spell_ref)
# maybe she already knows this spell
if HasSpell (pc, IE_SPELL_TYPE_WIZARD, spell['SpellLevel']-1, spell_ref) != -1:
return LSR_KNOWN
# level check (needs enough intelligence for this level of spell)
dumbness = GemRB.GetPlayerStat (pc, IE_INT)
if spell['SpellLevel'] > GemRB.GetAbilityBonus (IE_INT, 1, dumbness):
return LSR_LEVEL
return 0
def LearnPriestSpells (pc, level, mask):
"""Learns all the priest spells through the given spell level.
Mask distinguishes clerical and druidic spells."""
if level > 7: # make sure we don't have too high a level
level = 7
# go through each level
alignment = GemRB.GetPlayerStat (pc, IE_ALIGNMENT)
for i in range (level):
learnable = GetLearnablePriestSpells (mask, alignment, i+1)
for spell in learnable:
# if the spell isn't learned, learn it
if HasSpell (pc, IE_SPELL_TYPE_PRIEST, i, spell) < 0:
GemRB.LearnSpell (pc, spell)
return
def RemoveKnownSpells (pc, type, level1=1, level2=1, noslots=0, kit=0):
"""Removes all known spells of a given type between two spell levels.
If noslots is true, all memorization counts are set to 0.
Kit is used to identify the priest spell mask of the spells to be removed;
this is only used when removing spells in a dualclass."""
# choose the correct limit based upon class type
if type == IE_SPELL_TYPE_WIZARD:
limit = 9
elif type == IE_SPELL_TYPE_PRIEST:
limit = 7
# make sure that we get the original kit, if we have one
if kit:
originalkit = GetKitIndex (pc)
if originalkit: # kitted; find the class value
originalkit = CommonTables.KitList.GetValue (originalkit, 7)
else: # just get the class value
originalkit = GemRB.GetPlayerStat (pc, IE_CLASS)
# this is is specifically for dual-classes and will not work to remove only one
# spell type from a ranger/cleric multi-class
if CommonTables.ClassSkills.GetValue (originalkit, 0, 0) != "*": # knows druid spells
originalkit = 0x8000
elif CommonTables.ClassSkills.GetValue (originalkit, 1, 0) != "*": # knows cleric spells
originalkit = 0x4000
else: # don't know any other spells
originalkit = 0
# don't know how this would happen, but better to be safe
if originalkit == kit:
originalkit = 0
elif type == IE_SPELL_TYPE_INNATE:
limit = 1
else: # can't do anything if an improper spell type is sent
return 0
# make sure we're within parameters
if level1 < 1 or level2 > limit or level1 > level2:
return 0
# remove all spells for each level
for level in range (level1-1, level2):
# we need the count because we remove each spell in reverse order
count = GemRB.GetKnownSpellsCount (pc, type, level)
mod = count-1
for spell in range (count):
# see if we need to check for kit
if type == IE_SPELL_TYPE_PRIEST and kit:
# get the spell's ref data
ref = GemRB.GetKnownSpell (pc, type, level, mod-spell)
ref = GemRB.GetSpell (ref['SpellResRef'], 1)
# we have to look at the originalkit as well specifically for ranger/cleric dual-classes
# we wouldn't want to remove all cleric spells and druid spells if we lost our cleric class
# only the cleric ones
if kit&ref['SpellDivine'] or (originalkit and not originalkit&ref['SpellDivine']):
continue
# remove the spell
GemRB.RemoveSpell (pc, type, level, mod-spell)
# remove memorization counts if desired
if noslots:
GemRB.SetMemorizableSpellsCount (pc, 0, type, level)
# return success
return 1
| gpl-2.0 | -1,405,279,672,849,003,800 | 34.423983 | 112 | 0.708457 | false |
hkernbach/arangodb | 3rdParty/V8/v5.7.492.77/src/js/macros.py | 1 | 9259 | # Copyright 2006-2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Dictionary that is passed as defines for js2c.py.
# Used for defines that must be defined for all native JS files.
define NONE = 0;
define READ_ONLY = 1;
define DONT_ENUM = 2;
define DONT_DELETE = 4;
# 2^53 - 1
define kMaxSafeInteger = 9007199254740991;
# 2^32 - 1
define kMaxUint32 = 4294967295;
# Type query macros.
#
# Note: We have special support for typeof(foo) === 'bar' in the compiler.
# It will *not* generate a runtime typeof call for the most important
# values of 'bar'.
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
macro IS_DATE(arg) = (%IsDate(arg));
macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_FUNCTION(arg) = (%IsFunction(arg));
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_PROXY(arg) = (%_IsJSProxy(arg));
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
macro IS_SIMD_VALUE(arg) = (%IsSimdValue(arg));
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
macro IS_TYPEDARRAY(arg) = (%_IsTypedArray(arg));
macro IS_UNDEFINED(arg) = (arg === (void 0));
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
# Macro for ES queries of the type: "Type(O) is Object."
macro IS_RECEIVER(arg) = (%_IsJSReceiver(arg));
# Macro for ES queries of the type: "IsCallable(O)"
macro IS_CALLABLE(arg) = (typeof(arg) === 'function');
# Macro for ES6 CheckObjectCoercible
# Will throw a TypeError of the form "[functionName] called on null or undefined".
macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw %make_type_error(kCalledOnNullOrUndefined, functionName);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
macro TO_BOOLEAN(arg) = (!!(arg));
macro TO_INTEGER(arg) = (%_ToInteger(arg));
macro TO_INT32(arg) = ((arg) | 0);
macro TO_UINT32(arg) = ((arg) >>> 0);
macro INVERT_NEG_ZERO(arg) = ((arg) + 0);
macro TO_LENGTH(arg) = (%_ToLength(arg));
macro TO_STRING(arg) = (%_ToString(arg));
macro TO_NUMBER(arg) = (%_ToNumber(arg));
macro TO_OBJECT(arg) = (%_ToObject(arg));
macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key));
# Private names.
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
macro HAS_PRIVATE(obj, key) = HAS_OWN_PROPERTY(obj, key);
macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym]));
macro GET_PRIVATE(obj, sym) = (obj[sym]);
macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
# To avoid ES2015 Function name inference.
macro ANONYMOUS_FUNCTION(fn) = (0, (fn));
# Constants. The compiler constant folds them.
define INFINITY = (1/0);
define UNDEFINED = (void 0);
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
# For messages.js
# Matches Script::Type from objects.h
define TYPE_NATIVE = 0;
define TYPE_EXTENSION = 1;
define TYPE_NORMAL = 2;
# Matches Script::CompilationType from objects.h
define COMPILATION_TYPE_HOST = 0;
define COMPILATION_TYPE_EVAL = 1;
define COMPILATION_TYPE_JSON = 2;
# Must match PropertyFilter in property-details.h
define PROPERTY_FILTER_NONE = 0;
define PROPERTY_FILTER_ONLY_ENUMERABLE = 2;
define PROPERTY_FILTER_SKIP_STRINGS = 8;
define PROPERTY_FILTER_SKIP_SYMBOLS = 16;
# Use for keys, values and entries iterators.
define ITERATOR_KIND_KEYS = 1;
define ITERATOR_KIND_VALUES = 2;
define ITERATOR_KIND_ENTRIES = 3;
macro FIXED_ARRAY_GET(array, index) = (%_FixedArrayGet(array, (index) | 0));
macro FIXED_ARRAY_SET(array, index, value) = (%_FixedArraySet(array, (index) | 0, value));
# TODO(adamk): Find a more robust way to force Smi representation.
macro FIXED_ARRAY_SET_SMI(array, index, value) = (FIXED_ARRAY_SET(array, index, (value) | 0));
macro ORDERED_HASH_TABLE_BUCKET_COUNT(table) = (FIXED_ARRAY_GET(table, 0));
macro ORDERED_HASH_TABLE_ELEMENT_COUNT(table) = (FIXED_ARRAY_GET(table, 1));
macro ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 1, count));
macro ORDERED_HASH_TABLE_DELETED_COUNT(table) = (FIXED_ARRAY_GET(table, 2));
macro ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 2, count));
macro ORDERED_HASH_TABLE_BUCKET_AT(table, bucket) = (FIXED_ARRAY_GET(table, 3 + (bucket)));
macro ORDERED_HASH_TABLE_SET_BUCKET_AT(table, bucket, entry) = (FIXED_ARRAY_SET(table, 3 + (bucket), entry));
macro ORDERED_HASH_TABLE_HASH_TO_BUCKET(hash, numBuckets) = (hash & ((numBuckets) - 1));
macro ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets) = (3 + (numBuckets) + ((entry) << 1));
macro ORDERED_HASH_SET_KEY_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets)));
macro ORDERED_HASH_SET_CHAIN_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets) + 1));
macro ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) = (3 + (numBuckets) + ((entry) * 3));
macro ORDERED_HASH_MAP_KEY_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets)));
macro ORDERED_HASH_MAP_VALUE_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) + 1));
macro ORDERED_HASH_MAP_CHAIN_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) + 2));
# Must match OrderedHashTable::kNotFound.
define NOT_FOUND = -1;
# Check whether debug is active.
define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
# UseCounters from include/v8.h
define kUseAsm = 0;
define kBreakIterator = 1;
define kLegacyConst = 2;
define kMarkDequeOverflow = 3;
define kStoreBufferOverflow = 4;
define kSlotsBufferOverflow = 5;
define kForcedGC = 7;
define kSloppyMode = 8;
define kStrictMode = 9;
define kRegExpPrototypeStickyGetter = 11;
define kRegExpPrototypeToString = 12;
define kRegExpPrototypeUnicodeGetter = 13;
define kIntlV8Parse = 14;
define kIntlPattern = 15;
define kIntlResolved = 16;
define kPromiseChain = 17;
define kPromiseAccept = 18;
define kPromiseDefer = 19;
define kHtmlCommentInExternalScript = 20;
define kHtmlComment = 21;
define kSloppyModeBlockScopedFunctionRedefinition = 22;
define kForInInitializer = 23;
define kArrayProtectorDirtied = 24;
define kArraySpeciesModified = 25;
define kArrayPrototypeConstructorModified = 26;
define kArrayInstanceProtoModified = 27;
define kArrayInstanceConstructorModified = 28;
define kLegacyFunctionDeclaration = 29;
define kRegExpPrototypeSourceGetter = 30;
define kRegExpPrototypeOldFlagGetter = 31;
| apache-2.0 | -6,143,239,776,302,362,000 | 45.762626 | 161 | 0.696836 | false |
enthought/traitsbackendqt | enthought/traits/ui/qt4/image_enum_editor.py | 1 | 12386 | #------------------------------------------------------------------------------
#
# Copyright (c) 2009, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: Evan Patterson
# Date: 08/11/2009
#
#------------------------------------------------------------------------------
""" Defines the various image enumeration editors for the PyQt user interface
toolkit.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from enthought.qt import QtCore, QtGui
# FIXME: ToolkitEditorFactory is a proxy class defined here just for backward
# compatibility. The class has been moved to the
# enthought.traits.ui.editors.image_enum_editor file.
from enthought.traits.ui.editors.image_enum_editor import ToolkitEditorFactory
from editor import Editor
from enum_editor import BaseEditor as BaseEnumEditor
from enum_editor import SimpleEditor as SimpleEnumEditor
from enum_editor import RadioEditor as CustomEnumEditor
from helper import pixmap_cache
#-------------------------------------------------------------------------------
# 'BaseImageEnumEditor' class:
#-------------------------------------------------------------------------------
class BaseEditor(object):
""" The base class for the different styles of ImageEnumEditor.
"""
def get_pixmap(self, name):
""" Get a pixmap representing a possible object traits value.
"""
factory = self.factory
name = ''.join((factory.prefix, name, factory.suffix))
return pixmap_cache(name, factory._image_path)
#-------------------------------------------------------------------------------
# 'ReadonlyEditor' class:
#-------------------------------------------------------------------------------
class ReadonlyEditor(BaseEditor, BaseEnumEditor):
""" Read-only style of image enumeration editor, which displays a single
static image representing the object trait's value.
"""
#---------------------------------------------------------------------------
# Finishes initializing the editor by creating the underlying toolkit
# widget:
#---------------------------------------------------------------------------
def init(self, parent):
""" Finishes initializing the editor by creating the underlying toolkit
widget.
"""
self.control = QtGui.QLabel()
self.control.setPixmap(self.get_pixmap(self.str_value))
self.set_tooltip()
#---------------------------------------------------------------------------
# Updates the editor when the object trait changes external to the editor:
#---------------------------------------------------------------------------
def update_editor(self):
""" Updates the editor when the object trait changes externally to the
editor.
"""
self.control.setPixmap(self.get_pixmap(self.str_value))
#-------------------------------------------------------------------------------
# 'SimpleEditor' class:
#-------------------------------------------------------------------------------
class SimpleEditor(BaseEditor, SimpleEnumEditor):
""" Simple style of image enumeration editor, which displays a combo box.
"""
#---------------------------------------------------------------------------
# Returns the QComboBox used for the editor control:
#---------------------------------------------------------------------------
def create_combo_box(self):
""" Returns the QComboBox used for the editor control.
"""
control = ImageEnumComboBox(self)
control.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Maximum)
return control
#---------------------------------------------------------------------------
# Updates the editor when the object trait changes external to the editor:
#---------------------------------------------------------------------------
def update_editor ( self ):
""" Updates the editor when the object trait changes externally to the
editor.
"""
if self._no_enum_update == 0:
self._no_enum_update += 1
try:
index = self.names.index(self.inverse_mapping[self.value])
except:
self.control.setCurrentIndex(-1)
else:
cols = self.factory.cols
rows = (len(self.names) + cols - 1) / cols
row, col = index / cols, index % cols
self.control.setModelColumn(col)
self.control.setCurrentIndex(row)
self._no_enum_update -= 1
#---------------------------------------------------------------------------
# Rebuilds the contents of the editor whenever the original factory
# object's 'values' trait changes:
#---------------------------------------------------------------------------
def rebuild_editor(self):
""" Rebuilds the contents of the editor whenever the original factory
object's **values** trait changes.
"""
self.control.model().reset()
#-------------------------------------------------------------------------------
# 'CustomEditor' class:
#-------------------------------------------------------------------------------
class CustomEditor(BaseEditor, CustomEnumEditor):
""" Simple style of image enumeration editor, which displays a combo box.
"""
# Is the button layout row-major or column-major? This value overrides the
# default.
row_major = True
#---------------------------------------------------------------------------
# Returns the QAbstractButton used for the radio button:
#---------------------------------------------------------------------------
def create_button(self, name):
""" Returns the QAbstractButton used for the radio button.
"""
button = QtGui.QToolButton()
button.setAutoExclusive(True)
button.setCheckable(True)
pixmap = self.get_pixmap(name)
button.setIcon(QtGui.QIcon(pixmap))
button.setIconSize(pixmap.size())
return button
#-------------------------------------------------------------------------------
# Custom Qt objects used in the SimpleEditor:
#-------------------------------------------------------------------------------
class ImageEnumComboBox(QtGui.QComboBox):
""" A combo box which displays images instead of text.
"""
def __init__(self, editor, parent=None):
""" Reimplemented to store the editor and set a delegate for drawing the
items in the popup menu. If there is more than one column, use a
TableView instead of ListView for the popup.
"""
QtGui.QComboBox.__init__(self, parent)
self._editor = editor
model = ImageEnumModel(editor, self)
self.setModel(model)
delegate = ImageEnumItemDelegate(editor, self)
if editor.factory.cols > 1:
view = ImageEnumTablePopupView(self)
view.setItemDelegate(delegate)
self.setView(view)
# Unless we force it, the popup for a combo box will not be wider
# than the box itself, so we set a high minimum width.
width = 0
for col in xrange(self._editor.factory.cols):
width += view.sizeHintForColumn(col)
view.setMinimumWidth(width)
else:
self.setItemDelegate(delegate)
def paintEvent(self, event):
""" Reimplemented to draw the ComboBox frame and paint the image
centered in it.
"""
painter = QtGui.QStylePainter(self)
painter.setPen(self.palette().color(QtGui.QPalette.Text))
option = QtGui.QStyleOptionComboBox()
self.initStyleOption(option)
painter.drawComplexControl(QtGui.QStyle.CC_ComboBox, option)
editor = self._editor
pixmap = editor.get_pixmap(editor.inverse_mapping[editor.value])
arrow = self.style().subControlRect(QtGui.QStyle.CC_ComboBox, option,
QtGui.QStyle.SC_ComboBoxArrow)
option.rect.setWidth(option.rect.width() - arrow.width())
target = QtGui.QStyle.alignedRect(QtCore.Qt.LeftToRight,
QtCore.Qt.AlignCenter,
pixmap.size(), option.rect)
painter.drawPixmap(target, pixmap)
def sizeHint(self):
""" Reimplemented to set a size hint based on the size of the larget
image.
"""
size = QtCore.QSize()
for name in self._editor.names:
size = size.expandedTo(self._editor.get_pixmap(name).size())
option = QtGui.QStyleOptionComboBox()
self.initStyleOption(option)
size = self.style().sizeFromContents(QtGui.QStyle.CT_ComboBox, option,
size, self)
return size
class ImageEnumTablePopupView(QtGui.QTableView):
def __init__(self, parent):
""" Configure the appearence of the table view.
"""
QtGui.QTableView.__init__(self, parent)
hheader = self.horizontalHeader()
hheader.setResizeMode(QtGui.QHeaderView.ResizeToContents)
hheader.hide()
vheader = self.verticalHeader()
vheader.setResizeMode(QtGui.QHeaderView.ResizeToContents)
vheader.hide()
self.setShowGrid(False)
class ImageEnumItemDelegate(QtGui.QStyledItemDelegate):
""" An item delegate which draws only images.
"""
def __init__(self, editor, parent):
""" Reimplemented to store the editor.
"""
QtGui.QStyledItemDelegate.__init__(self, parent)
self._editor = editor
def displayText(self, value, locale):
""" Reimplemented to display nothing.
"""
return ''
def paint(self, painter, option, mi):
""" Reimplemented to draw images.
"""
# Delegate to our superclass to draw the background
QtGui.QStyledItemDelegate.paint(self, painter, option, mi)
# Now draw the pixmap
name = mi.data(QtCore.Qt.DisplayRole)
pixmap = self._get_pixmap(name)
if pixmap is not None:
target = QtGui.QStyle.alignedRect(QtCore.Qt.LeftToRight,
QtCore.Qt.AlignCenter,
pixmap.size(), option.rect)
painter.drawPixmap(target, pixmap)
def sizeHint(self, option, mi):
""" Reimplemented to define a size hint based on the size of the pixmap.
"""
name = mi.data(QtCore.Qt.DisplayRole)
pixmap = self._get_pixmap(name)
if pixmap is None:
return QtCore.QSize()
return pixmap.size()
def _get_pixmap(self, name):
return self._editor.get_pixmap(name)
class ImageEnumModel(QtCore.QAbstractTableModel):
""" A table model for use with the 'simple' style ImageEnumEditor.
"""
def __init__(self, editor, parent):
""" Reimplemented to store the editor.
"""
super(ImageEnumModel, self).__init__(parent)
self._editor = editor
def rowCount(self, mi):
""" Reimplemented to return the number of rows.
"""
cols = self._editor.factory.cols
result = (len(self._editor.names) + cols - 1) / cols
return result
def columnCount(self, mi):
""" Reimplemented to return the number of columns.
"""
return self._editor.factory.cols
def data(self, mi, role):
""" Reimplemented to return the data.
"""
if role == QtCore.Qt.DisplayRole:
index = mi.row() * self._editor.factory.cols + mi.column()
if index < len(self._editor.names):
return self._editor.names[index]
return None
| gpl-2.0 | -7,069,525,519,631,933,000 | 37.110769 | 80 | 0.516793 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/gtk/gdk/__init__/PixbufAnimation.py | 1 | 1026 | # encoding: utf-8
# module gtk.gdk
# from /usr/lib/python2.7/dist-packages/gtk-2.0/pynotify/_pynotify.so
# by generator 1.135
# no doc
# imports
from exceptions import Warning
import gio as __gio
import gobject as __gobject
import gobject._gobject as __gobject__gobject
import pango as __pango
import pangocairo as __pangocairo
class PixbufAnimation(__gobject__gobject.GObject):
"""
Object GdkPixbufAnimation
Signals from GObject:
notify (GParam)
"""
def get_height(self, *args, **kwargs): # real signature unknown
pass
def get_iter(self, *args, **kwargs): # real signature unknown
pass
def get_static_image(self, *args, **kwargs): # real signature unknown
pass
def get_width(self, *args, **kwargs): # real signature unknown
pass
def is_static_image(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__gtype__ = None # (!) real value is ''
| gpl-2.0 | 4,487,719,490,974,702,000 | 22.318182 | 73 | 0.648148 | false |
ActiveState/code | recipes/Python/66426_ReadWriteLock/recipe-66426.py | 1 | 1131 | import threading
class ReadWriteLock:
"""A lock object that allows many simultaneous "read-locks", but
only one "write-lock"."""
def __init__(self):
self._read_ready = threading.Condition(threading.Lock())
self._readers = 0
def acquire_read(self):
"""Acquire a read-lock. Blocks only if some thread has
acquired write-lock."""
self._read_ready.acquire()
try:
self._readers += 1
finally:
self._read_ready.release()
def release_read(self):
"""Release a read-lock."""
self._read_ready.acquire()
try:
self._readers -= 1
if not self._readers:
self._read_ready.notifyAll()
finally:
self._read_ready.release()
def acquire_write(self):
"""Acquire a write lock. Blocks until there are no
acquired read- or write-locks."""
self._read_ready.acquire()
while self._readers > 0:
self._read_ready.wait()
def release_write(self):
"""Release a write-lock."""
self._read_ready.release()
| mit | 3,673,597,987,764,956,700 | 28 | 68 | 0.555261 | false |
realitix/vulkan | setup.py | 1 | 1143 | from setuptools import setup
with open("README.md") as file:
long_description = file.read()
setup(
name='vulkan',
version='1.1.99.1',
description='Ultimate Python binding for Vulkan API',
author='realitix',
author_email='[email protected]',
packages=['vulkan'],
long_descripiton=long_description,
long_description_content_type="text/markdown",
include_package_data=True,
install_requires=['cffi>=1.10'],
setup_requires=['cffi>=1.10'],
url='https://github.com/realitix/vulkan',
keywords='Graphics,3D,Vulkan,cffi',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Natural Language :: English",
"Topic :: Multimedia :: Graphics",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries :: Python Modules",
],
cffi_modules=["vulkan/vulkan_build.py:ffi"]
)
| apache-2.0 | -5,513,329,181,179,121,000 | 31.657143 | 71 | 0.634296 | false |
VerosK/django-dashing | dashing/widgets.py | 1 | 2802 | # -*- coding: utf-8 -*-
import json
from django.http import HttpResponse
from django.views.generic.detail import View
class JSONResponseMixin(object):
"""
A mixin that can be used to render a JSON response.
"""
def render_to_json_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
return HttpResponse(
self.convert_context_to_json(context),
content_type='application/json',
**response_kwargs
)
def convert_context_to_json(self, context):
"Convert the context dictionary into a JSON object"
return json.dumps(context)
class Widget(JSONResponseMixin, View):
def get(self, request, *args, **kwargs):
context = self.get_context()
return HttpResponse(json.dumps(context), content_type="application/json")
def render_to_response(self, context, **response_kwargs):
return self.render_to_json_response(context, **response_kwargs)
class NumberWidget(Widget):
title = ''
more_info = ''
updated_at = ''
change_rate = ''
value = ''
def get_title(self):
return self.title
def get_more_info(self):
return self.more_info
def get_updated_at(self):
return self.updated_at
def get_change_rate(self):
return self.change_rate
def get_value(self):
return self.value
def get_context(self):
return {
'title': self.get_title(),
'more_info': self.get_more_info(),
'updated_at': self.get_updated_at(),
'change_rate': self.get_change_rate(),
'value': self.get_value(),
}
class ListWidget(Widget):
title = ''
more_info = ''
updated_at = ''
data = []
def get_title(self):
return self.title
def get_more_info(self):
return self.more_info
def get_updated_at(self):
return self.updated_at
def get_data(self):
return self.data
def get_context(self):
return {
'title': self.get_title(),
'more_info': self.get_more_info(),
'updated_at': self.get_updated_at(),
'data': self.get_data(),
}
class GraphWidget(Widget):
title = ''
more_info = ''
value = ''
data = []
def get_title(self):
return self.title
def get_more_info(self):
return self.more_info
def get_value(self):
return self.value
def get_data(self):
return self.data
def get_context(self):
return {
'title': self.get_title(),
'more_info': self.get_more_info(),
'value': self.get_value(),
'data': self.get_data(),
}
| bsd-3-clause | 4,210,653,318,595,151,000 | 22.745763 | 81 | 0.568879 | false |
sunqm/pyscf | pyscf/pbc/gw/krgw_ac.py | 1 | 25949 | #!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Tianyu Zhu <[email protected]>
#
'''
PBC spin-restricted G0W0-AC QP eigenvalues with k-point sampling
This implementation has N^4 scaling, and is faster than GW-CD (N^4)
and analytic GW (N^6) methods.
GW-AC is recommended for valence states only, and is inaccuarate for core states.
Method:
See T. Zhu and G.K.-L. Chan, arxiv:2007.03148 (2020) for details
Compute Sigma on imaginary frequency with density fitting,
then analytically continued to real frequency.
Gaussian density fitting must be used (FFTDF and MDF are not supported).
'''
from functools import reduce
import numpy
import numpy as np
import h5py
from scipy.optimize import newton, least_squares
from pyscf import lib
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo.incore import _conc_mos
from pyscf.pbc import df, dft, scf
from pyscf.pbc.mp.kmp2 import get_nocc, get_nmo, get_frozen_mask
from pyscf import __config__
einsum = lib.einsum
def kernel(gw, mo_energy, mo_coeff, orbs=None,
kptlist=None, nw=None, verbose=logger.NOTE):
'''GW-corrected quasiparticle orbital energies
Returns:
A list : converged, mo_energy, mo_coeff
'''
mf = gw._scf
if gw.frozen is None:
frozen = 0
else:
frozen = gw.frozen
assert (frozen == 0)
if orbs is None:
orbs = range(gw.nmo)
if kptlist is None:
kptlist = range(gw.nkpts)
nkpts = gw.nkpts
nklist = len(kptlist)
# v_xc
dm = np.array(mf.make_rdm1())
v_mf = np.array(mf.get_veff()) - np.array(mf.get_j(dm_kpts=dm))
for k in range(nkpts):
v_mf[k] = reduce(numpy.dot, (mo_coeff[k].T.conj(), v_mf[k], mo_coeff[k]))
nocc = gw.nocc
nmo = gw.nmo
# v_hf from DFT/HF density
if gw.fc:
exxdiv = 'ewald'
else:
exxdiv = None
rhf = scf.KRHF(gw.mol, gw.kpts, exxdiv=exxdiv)
rhf.with_df = gw.with_df
if getattr(gw.with_df, '_cderi', None) is None:
raise RuntimeError('Found incompatible integral scheme %s.'
'KGWAC can be only used with GDF integrals' %
gw.with_df.__class__)
vk = rhf.get_veff(gw.mol,dm_kpts=dm) - rhf.get_j(gw.mol,dm_kpts=dm)
for k in range(nkpts):
vk[k] = reduce(numpy.dot, (mo_coeff[k].T.conj(), vk[k], mo_coeff[k]))
# Grids for integration on imaginary axis
freqs,wts = _get_scaled_legendre_roots(nw)
# Compute self-energy on imaginary axis i*[0,iw_cutoff]
sigmaI, omega = get_sigma_diag(gw, orbs, kptlist, freqs, wts, iw_cutoff=5.)
# Analytic continuation
coeff = []
if gw.ac == 'twopole':
for k in range(nklist):
coeff.append(AC_twopole_diag(sigmaI[k], omega, orbs, nocc))
elif gw.ac == 'pade':
for k in range(nklist):
coeff_tmp, omega_fit = AC_pade_thiele_diag(sigmaI[k], omega)
coeff.append(coeff_tmp)
coeff = np.array(coeff)
conv = True
# This code does not support metals
homo = -99.
lumo = 99.
for k in range(nkpts):
if homo < mf.mo_energy[k][nocc-1]:
homo = mf.mo_energy[k][nocc-1]
if lumo > mf.mo_energy[k][nocc]:
lumo = mf.mo_energy[k][nocc]
ef = (homo+lumo)/2.
mo_energy = np.zeros_like(np.array(mf.mo_energy))
for k in range(nklist):
kn = kptlist[k]
for p in orbs:
if gw.linearized:
# linearized G0W0
de = 1e-6
ep = mf.mo_energy[kn][p]
#TODO: analytic sigma derivative
if gw.ac == 'twopole':
sigmaR = two_pole(ep-ef, coeff[k,:,p-orbs[0]]).real
dsigma = two_pole(ep-ef+de, coeff[k,:,p-orbs[0]]).real - sigmaR.real
elif gw.ac == 'pade':
sigmaR = pade_thiele(ep-ef, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real
dsigma = pade_thiele(ep-ef+de, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real - sigmaR.real
zn = 1.0/(1.0-dsigma/de)
e = ep + zn*(sigmaR.real + vk[kn,p,p].real - v_mf[kn,p,p].real)
mo_energy[kn,p] = e
else:
# self-consistently solve QP equation
def quasiparticle(omega):
if gw.ac == 'twopole':
sigmaR = two_pole(omega-ef, coeff[k,:,p-orbs[0]]).real
elif gw.ac == 'pade':
sigmaR = pade_thiele(omega-ef, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real
return omega - mf.mo_energy[kn][p] - (sigmaR.real + vk[kn,p,p].real - v_mf[kn,p,p].real)
try:
e = newton(quasiparticle, mf.mo_energy[kn][p], tol=1e-6, maxiter=100)
mo_energy[kn,p] = e
except RuntimeError:
conv = False
mo_coeff = mf.mo_coeff
if gw.verbose >= logger.DEBUG:
numpy.set_printoptions(threshold=nmo)
for k in range(nkpts):
logger.debug(gw, ' GW mo_energy @ k%d =\n%s', k,mo_energy[k])
numpy.set_printoptions(threshold=1000)
return conv, mo_energy, mo_coeff
def get_rho_response(gw, omega, mo_energy, Lpq, kL, kidx):
'''
Compute density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
# Compute Pi for kL
Pi = np.zeros((naux,naux),dtype=np.complex128)
for i, kpti in enumerate(kpts):
# Find ka that conserves with ki and kL (-ki+ka+kL=G)
a = kidx[i]
eia = mo_energy[i,:nocc,None] - mo_energy[a,None,nocc:]
eia = eia/(omega**2+eia*eia)
Pia = einsum('Pia,ia->Pia',Lpq[i][:,:nocc,nocc:],eia)
# Response from both spin-up and spin-down density
Pi += 4./nkpts * einsum('Pia,Qia->PQ',Pia,Lpq[i][:,:nocc,nocc:].conj())
return Pi
def get_sigma_diag(gw, orbs, kptlist, freqs, wts, iw_cutoff=None, max_memory=8000):
'''
Compute GW correlation self-energy (diagonal elements)
in MO basis on imaginary axis
'''
mo_energy = np.array(gw._scf.mo_energy)
mo_coeff = np.array(gw._scf.mo_coeff)
nocc = gw.nocc
nmo = gw.nmo
nkpts = gw.nkpts
kpts = gw.kpts
nklist = len(kptlist)
nw = len(freqs)
norbs = len(orbs)
mydf = gw.with_df
# possible kpts shift center
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
# This code does not support metals
homo = -99.
lumo = 99.
for k in range(nkpts):
if homo < mo_energy[k][nocc-1]:
homo = mo_energy[k][nocc-1]
if lumo > mo_energy[k][nocc]:
lumo = mo_energy[k][nocc]
if (lumo-homo)<1e-3:
logger.warn(gw, 'This GW-AC code is not supporting metals!')
ef = (homo+lumo)/2.
# Integration on numerical grids
if iw_cutoff is not None:
nw_sigma = sum(iw < iw_cutoff for iw in freqs) + 1
else:
nw_sigma = nw + 1
# Compute occ for -iw and vir for iw separately
# to avoid branch cuts in analytic continuation
omega_occ = np.zeros((nw_sigma), dtype=np.complex128)
omega_vir = np.zeros((nw_sigma), dtype=np.complex128)
omega_occ[1:] = -1j*freqs[:(nw_sigma-1)]
omega_vir[1:] = 1j*freqs[:(nw_sigma-1)]
orbs_occ = [i for i in orbs if i < nocc]
norbs_occ = len(orbs_occ)
emo_occ = np.zeros((nkpts,nmo,nw_sigma),dtype=np.complex128)
emo_vir = np.zeros((nkpts,nmo,nw_sigma),dtype=np.complex128)
for k in range(nkpts):
emo_occ[k] = omega_occ[None,:] + ef - mo_energy[k][:,None]
emo_vir[k] = omega_vir[None,:] + ef - mo_energy[k][:,None]
sigma = np.zeros((nklist,norbs,nw_sigma),dtype=np.complex128)
omega = np.zeros((norbs,nw_sigma),dtype=np.complex128)
for p in range(norbs):
orbp = orbs[p]
if orbp < nocc:
omega[p] = omega_occ.copy()
else:
omega[p] = omega_vir.copy()
if gw.fc:
# Set up q mesh for q->0 finite size correction
q_pts = np.array([1e-3,0,0]).reshape(1,3)
q_abs = gw.mol.get_abs_kpts(q_pts)
# Get qij = 1/sqrt(Omega) * < psi_{ik} | e^{iqr} | psi_{ak-q} > at q: (nkpts, nocc, nvir)
qij = get_qij(gw, q_abs[0], mo_coeff)
for kL in range(nkpts):
# Lij: (ki, L, i, j) for looping every kL
Lij = []
# kidx: save kj that conserves with kL and ki (-ki+kj+kL=G)
# kidx_r: save ki that conserves with kL and kj (-ki+kj+kL=G)
kidx = np.zeros((nkpts),dtype=np.int64)
kidx_r = np.zeros((nkpts),dtype=np.int64)
for i, kpti in enumerate(kpts):
for j, kptj in enumerate(kpts):
# Find (ki,kj) that satisfies momentum conservation with kL
kconserv = -kscaled[i] + kscaled[j] + kscaled[kL]
is_kconserv = np.linalg.norm(np.round(kconserv) - kconserv) < 1e-12
if is_kconserv:
kidx[i] = j
kidx_r[j] = i
logger.debug(gw, "Read Lpq (kL: %s / %s, ki: %s, kj: %s)"%(kL+1, nkpts, i, j))
Lij_out = None
# Read (L|pq) and ao2mo transform to (L|ij)
Lpq = []
for LpqR, LpqI, sign \
in mydf.sr_loop([kpti, kptj], max_memory=0.1*gw._scf.max_memory, compact=False):
Lpq.append(LpqR+LpqI*1.0j)
# support uneqaul naux on different k points
Lpq = np.vstack(Lpq).reshape(-1,nmo**2)
tao = []
ao_loc = None
moij, ijslice = _conc_mos(mo_coeff[i], mo_coeff[j])[2:]
Lij_out = _ao2mo.r_e2(Lpq, moij, ijslice, tao, ao_loc, out=Lij_out)
Lij.append(Lij_out.reshape(-1,nmo,nmo))
Lij = np.asarray(Lij)
naux = Lij.shape[1]
if kL == 0:
for w in range(nw):
# body dielectric matrix eps_body
Pi = get_rho_response(gw, freqs[w], mo_energy, Lij, kL, kidx)
eps_body_inv = np.linalg.inv(np.eye(naux)-Pi)
if gw.fc:
# head dielectric matrix eps_00
Pi_00 = get_rho_response_head(gw, freqs[w], mo_energy, qij)
eps_00 = 1. - 4. * np.pi/np.linalg.norm(q_abs[0])**2 * Pi_00
# wings dielectric matrix eps_P0
Pi_P0 = get_rho_response_wing(gw, freqs[w], mo_energy, Lij, qij)
eps_P0 = -np.sqrt(4.*np.pi) / np.linalg.norm(q_abs[0]) * Pi_P0
# inverse dielectric matrix
eps_inv_00 = 1./(eps_00 - np.dot(np.dot(eps_P0.conj(),eps_body_inv),eps_P0))
eps_inv_P0 = -eps_inv_00 * np.dot(eps_body_inv, eps_P0)
# head correction
Del_00 = 2./np.pi * (6.*np.pi**2/gw.mol.vol/nkpts)**(1./3.) * (eps_inv_00 - 1.)
eps_inv_PQ = eps_body_inv
g0_occ = wts[w] * emo_occ / (emo_occ**2+freqs[w]**2)
g0_vir = wts[w] * emo_vir / (emo_vir**2+freqs[w]**2)
for k in range(nklist):
kn = kptlist[k]
# Find km that conserves with kn and kL (-km+kn+kL=G)
km = kidx_r[kn]
Qmn = einsum('Pmn,PQ->Qmn',Lij[km][:,:,orbs].conj(),eps_inv_PQ-np.eye(naux))
Wmn = 1./nkpts * einsum('Qmn,Qmn->mn',Qmn,Lij[km][:,:,orbs])
sigma[k][:norbs_occ] += -einsum('mn,mw->nw',Wmn[:,:norbs_occ],g0_occ[km])/np.pi
sigma[k][norbs_occ:] += -einsum('mn,mw->nw',Wmn[:,norbs_occ:],g0_vir[km])/np.pi
if gw.fc:
# apply head correction
assert(kn == km)
sigma[k][:norbs_occ] += -Del_00 * g0_occ[kn][orbs][:norbs_occ] /np.pi
sigma[k][norbs_occ:] += -Del_00 * g0_vir[kn][orbs][norbs_occ:] /np.pi
# apply wing correction
Wn_P0 = einsum('Pnm,P->nm',Lij[kn],eps_inv_P0).diagonal()
Wn_P0 = Wn_P0.real * 2.
Del_P0 = np.sqrt(gw.mol.vol/4./np.pi**3) * (6.*np.pi**2/gw.mol.vol/nkpts)**(2./3.) * Wn_P0[orbs]
sigma[k][:norbs_occ] += -einsum('n,nw->nw', Del_P0[:norbs_occ],
g0_occ[kn][orbs][:norbs_occ]) /np.pi
sigma[k][norbs_occ:] += -einsum('n,nw->nw', Del_P0[norbs_occ:],
g0_vir[kn][orbs][norbs_occ:]) /np.pi
else:
for w in range(nw):
Pi = get_rho_response(gw, freqs[w], mo_energy, Lij, kL, kidx)
Pi_inv = np.linalg.inv(np.eye(naux)-Pi)-np.eye(naux)
g0_occ = wts[w] * emo_occ / (emo_occ**2+freqs[w]**2)
g0_vir = wts[w] * emo_vir / (emo_vir**2+freqs[w]**2)
for k in range(nklist):
kn = kptlist[k]
# Find km that conserves with kn and kL (-km+kn+kL=G)
km = kidx_r[kn]
Qmn = einsum('Pmn,PQ->Qmn',Lij[km][:,:,orbs].conj(),Pi_inv)
Wmn = 1./nkpts * einsum('Qmn,Qmn->mn',Qmn,Lij[km][:,:,orbs])
sigma[k][:norbs_occ] += -einsum('mn,mw->nw',Wmn[:,:norbs_occ],g0_occ[km])/np.pi
sigma[k][norbs_occ:] += -einsum('mn,mw->nw',Wmn[:,norbs_occ:],g0_vir[km])/np.pi
return sigma, omega
def get_rho_response_head(gw, omega, mo_energy, qij):
'''
Compute head (G=0, G'=0) density response function in auxiliary basis at freq iw
'''
nkpts, nocc, nvir = qij.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi head
Pi_00 = 0j
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = eia/(omega**2+eia*eia)
Pi_00 += 4./nkpts * einsum('ia,ia->',eia,qij[i].conj()*qij[i])
return Pi_00
def get_rho_response_wing(gw, omega, mo_energy, Lpq, qij):
'''
Compute wing (G=P, G'=0) density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi wing
Pi = np.zeros(naux,dtype=np.complex128)
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = eia/(omega**2+eia*eia)
eia_q = eia * qij[i].conj()
Pi += 4./nkpts * einsum('Pia,ia->P',Lpq[i][:,:nocc,nocc:],eia_q)
return Pi
def get_qij(gw, q, mo_coeff, uniform_grids=False):
'''
Compute qij = 1/Omega * |< psi_{ik} | e^{iqr} | psi_{ak-q} >|^2 at q: (nkpts, nocc, nvir)
through kp perturbtation theory
Ref: Phys. Rev. B 83, 245122 (2011)
'''
nocc = gw.nocc
nmo = gw.nmo
nvir = nmo - nocc
kpts = gw.kpts
nkpts = len(kpts)
cell = gw.mol
mo_energy = gw._scf.mo_energy
if uniform_grids:
mydf = df.FFTDF(cell, kpts=kpts)
coords = cell.gen_uniform_grids(mydf.mesh)
else:
coords, weights = dft.gen_grid.get_becke_grids(cell,level=5)
ngrid = len(coords)
qij = np.zeros((nkpts,nocc,nvir),dtype=np.complex128)
for i, kpti in enumerate(kpts):
ao_p = dft.numint.eval_ao(cell, coords, kpt=kpti, deriv=1)
ao = ao_p[0]
ao_grad = ao_p[1:4]
if uniform_grids:
ao_ao_grad = einsum('mg,xgn->xmn',ao.T.conj(),ao_grad) * cell.vol / ngrid
else:
ao_ao_grad = einsum('g,mg,xgn->xmn',weights,ao.T.conj(),ao_grad)
q_ao_ao_grad = -1j * einsum('x,xmn->mn',q,ao_ao_grad)
q_mo_mo_grad = np.dot(np.dot(mo_coeff[i][:,:nocc].T.conj(), q_ao_ao_grad), mo_coeff[i][:,nocc:])
enm = 1./(mo_energy[i][nocc:,None] - mo_energy[i][None,:nocc])
dens = enm.T * q_mo_mo_grad
qij[i] = dens / np.sqrt(cell.vol)
return qij
def _get_scaled_legendre_roots(nw):
"""
Scale nw Legendre roots, which lie in the
interval [-1, 1], so that they lie in [0, inf)
Ref: www.cond-mat.de/events/correl19/manuscripts/ren.pdf
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs, wts = np.polynomial.legendre.leggauss(nw)
x0 = 0.5
freqs_new = x0*(1.+freqs)/(1.-freqs)
wts = wts*2.*x0/(1.-freqs)**2
return freqs_new, wts
def _get_clenshaw_curtis_roots(nw):
"""
Clenshaw-Curtis qaudrature on [0,inf)
Ref: J. Chem. Phys. 132, 234114 (2010)
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs = np.zeros(nw)
wts = np.zeros(nw)
a = 0.2
for w in range(nw):
t = (w+1.0)/nw * np.pi/2.
freqs[w] = a / np.tan(t)
if w != nw-1:
wts[w] = a*np.pi/2./nw/(np.sin(t)**2)
else:
wts[w] = a*np.pi/4./nw/(np.sin(t)**2)
return freqs[::-1], wts[::-1]
def two_pole_fit(coeff, omega, sigma):
cf = coeff[:5] + 1j*coeff[5:]
f = cf[0] + cf[1]/(omega+cf[3]) + cf[2]/(omega+cf[4]) - sigma
f[0] = f[0]/0.01
return np.array([f.real,f.imag]).reshape(-1)
def two_pole(freqs, coeff):
cf = coeff[:5] + 1j*coeff[5:]
return cf[0] + cf[1]/(freqs+cf[3]) + cf[2]/(freqs+cf[4])
def AC_twopole_diag(sigma, omega, orbs, nocc):
"""
Analytic continuation to real axis using a two-pole model
Returns:
coeff: 2D array (ncoeff, norbs)
"""
norbs, nw = sigma.shape
coeff = np.zeros((10,norbs))
for p in range(norbs):
if orbs[p] < nocc:
x0 = np.array([0, 1, 1, 1, -1, 0, 0, 0, -1.0, -0.5])
else:
x0 = np.array([0, 1, 1, 1, -1, 0, 0, 0, 1.0, 0.5])
#TODO: analytic gradient
xopt = least_squares(two_pole_fit, x0, jac='3-point', method='trf', xtol=1e-10,
gtol = 1e-10, max_nfev=1000, verbose=0, args=(omega[p], sigma[p]))
if xopt.success is False:
print('WARN: 2P-Fit Orb %d not converged, cost function %e'%(p,xopt.cost))
coeff[:,p] = xopt.x.copy()
return coeff
def thiele(fn,zn):
nfit = len(zn)
g = np.zeros((nfit,nfit),dtype=np.complex128)
g[:,0] = fn.copy()
for i in range(1,nfit):
g[i:,i] = (g[i-1,i-1]-g[i:,i-1])/((zn[i:]-zn[i-1])*g[i:,i-1])
a = g.diagonal()
return a
def pade_thiele(freqs,zn,coeff):
nfit = len(coeff)
X = coeff[-1]*(freqs-zn[-2])
for i in range(nfit-1):
idx = nfit-i-1
X = coeff[idx]*(freqs-zn[idx-1])/(1.+X)
X = coeff[0]/(1.+X)
return X
def AC_pade_thiele_diag(sigma, omega):
"""
Analytic continuation to real axis using a Pade approximation
from Thiele's reciprocal difference method
Reference: J. Low Temp. Phys. 29, 179 (1977)
Returns:
coeff: 2D array (ncoeff, norbs)
omega: 2D array (norbs, npade)
"""
idx = range(1,40,6)
sigma1 = sigma[:,idx].copy()
sigma2 = sigma[:,(idx[-1]+4)::4].copy()
sigma = np.hstack((sigma1,sigma2))
omega1 = omega[:,idx].copy()
omega2 = omega[:,(idx[-1]+4)::4].copy()
omega = np.hstack((omega1,omega2))
norbs, nw = sigma.shape
npade = nw // 2
coeff = np.zeros((npade*2,norbs),dtype=np.complex128)
for p in range(norbs):
coeff[:,p] = thiele(sigma[p,:npade*2], omega[p,:npade*2])
return coeff, omega[:,:npade*2]
class KRGWAC(lib.StreamObject):
linearized = getattr(__config__, 'gw_gw_GW_linearized', False)
# Analytic continuation: pade or twopole
ac = getattr(__config__, 'gw_gw_GW_ac', 'pade')
# Whether applying finite size corrections
fc = getattr(__config__, 'gw_gw_GW_fc', True)
def __init__(self, mf, frozen=0):
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
#TODO: implement frozen orbs
if frozen > 0:
raise NotImplementedError
self.frozen = frozen
# DF-KGW must use GDF integrals
if getattr(mf, 'with_df', None):
self.with_df = mf.with_df
else:
raise NotImplementedError
self._keys.update(['with_df'])
##################################################
# don't modify the following attributes, they are not input options
self._nocc = None
self._nmo = None
self.kpts = mf.kpts
self.nkpts = len(self.kpts)
# self.mo_energy: GW quasiparticle energy, not scf mo_energy
self.mo_energy = None
self.mo_coeff = mf.mo_coeff
self.mo_occ = mf.mo_occ
self.sigma = None
keys = set(('linearized','ac','fc'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('method = %s', self.__class__.__name__)
nocc = self.nocc
nvir = self.nmo - nocc
nkpts = self.nkpts
log.info('GW nocc = %d, nvir = %d, nkpts = %d', nocc, nvir, nkpts)
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
logger.info(self, 'use perturbative linearized QP eqn = %s', self.linearized)
logger.info(self, 'analytic continuation method = %s', self.ac)
logger.info(self, 'GW finite size corrections = %s', self.fc)
return self
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def kernel(self, mo_energy=None, mo_coeff=None, orbs=None, kptlist=None, nw=100):
"""
Input:
kptlist: self-energy k-points
orbs: self-energy orbs
nw: grid number
Output:
mo_energy: GW quasiparticle energy
"""
if mo_coeff is None:
mo_coeff = np.array(self._scf.mo_coeff)
if mo_energy is None:
mo_energy = np.array(self._scf.mo_energy)
nmo = self.nmo
naux = self.with_df.get_naoaux()
nkpts = self.nkpts
mem_incore = (2*nkpts*nmo**2*naux) * 16/1e6
mem_now = lib.current_memory()[0]
if (mem_incore + mem_now > 0.99*self.max_memory):
logger.warn(self, 'Memory may not be enough!')
raise NotImplementedError
cput0 = (logger.process_clock(), logger.perf_counter())
self.dump_flags()
self.converged, self.mo_energy, self.mo_coeff = \
kernel(self, mo_energy, mo_coeff, orbs=orbs,
kptlist=kptlist, nw=nw, verbose=self.verbose)
logger.warn(self, 'GW QP energies may not be sorted from min to max')
logger.timer(self, 'GW', *cput0)
return self.mo_energy
if __name__ == '__main__':
from pyscf.pbc import gto
from pyscf.pbc.lib import chkfile
import os
# This test takes a few minutes
cell = gto.Cell()
cell.build(unit = 'angstrom',
a = '''
0.000000 1.783500 1.783500
1.783500 0.000000 1.783500
1.783500 1.783500 0.000000
''',
atom = 'C 1.337625 1.337625 1.337625; C 2.229375 2.229375 2.229375',
dimension = 3,
max_memory = 8000,
verbose = 4,
pseudo = 'gth-pade',
basis='gth-szv',
precision=1e-10)
kpts = cell.make_kpts([3,1,1],scaled_center=[0,0,0])
gdf = df.GDF(cell, kpts)
gdf_fname = 'gdf_ints_311.h5'
gdf._cderi_to_save = gdf_fname
if not os.path.isfile(gdf_fname):
gdf.build()
chkfname = 'diamond_311.chk'
if os.path.isfile(chkfname):
kmf = dft.KRKS(cell, kpts)
kmf.xc = 'pbe'
kmf.with_df = gdf
kmf.with_df._cderi = gdf_fname
data = chkfile.load(chkfname, 'scf')
kmf.__dict__.update(data)
else:
kmf = dft.KRKS(cell, kpts)
kmf.xc = 'pbe'
kmf.with_df = gdf
kmf.with_df._cderi = gdf_fname
kmf.conv_tol = 1e-12
kmf.chkfile = chkfname
kmf.kernel()
gw = KRGWAC(kmf)
gw.linearized = False
gw.ac = 'pade'
# without finite size corrections
gw.fc = False
nocc = gw.nocc
gw.kernel(kptlist=[0,1,2],orbs=range(0,nocc+3))
print(gw.mo_energy)
assert((abs(gw.mo_energy[0][nocc-1]-0.62045797))<1e-5)
assert((abs(gw.mo_energy[0][nocc]-0.96574324))<1e-5)
assert((abs(gw.mo_energy[1][nocc-1]-0.52639137))<1e-5)
assert((abs(gw.mo_energy[1][nocc]-1.07513258))<1e-5)
# with finite size corrections
gw.fc = True
gw.kernel(kptlist=[0,1,2],orbs=range(0,nocc+3))
print(gw.mo_energy)
assert((abs(gw.mo_energy[0][nocc-1]-0.54277092))<1e-5)
assert((abs(gw.mo_energy[0][nocc]-0.80148537))<1e-5)
assert((abs(gw.mo_energy[1][nocc-1]-0.45073793))<1e-5)
assert((abs(gw.mo_energy[1][nocc]-0.92910108))<1e-5)
| apache-2.0 | -9,168,461,443,576,111,000 | 35.754958 | 120 | 0.541524 | false |
chrislit/abydos | tests/distance/test_distance_baystat.py | 1 | 3929 | # Copyright 2018-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_baystat.
This module contains unit tests for abydos.distance.Baystat
"""
import unittest
from abydos.distance import Baystat
class BaystatTestCases(unittest.TestCase):
"""Test Baystat functions.
abydos.distance.Baystat
"""
cmp = Baystat()
def test_baystat_sim(self):
"""Test abydos.distance.Baystat.sim."""
# Base cases
self.assertEqual(self.cmp.sim('', ''), 1)
self.assertEqual(self.cmp.sim('Colin', ''), 0)
self.assertEqual(self.cmp.sim('Colin', 'Colin'), 1)
# Examples given in the paper
# https://www.statistik.bayern.de/medien/statistik/zensus/zusammenf__hrung_von_datenbest__nden_ohne_numerische_identifikatoren.pdf
self.assertAlmostEqual(self.cmp.sim('DRAKOMENA', 'DRAOMINA'), 7 / 9)
self.assertAlmostEqual(self.cmp.sim('RIEKI', 'RILKI'), 4 / 5)
self.assertAlmostEqual(
self.cmp.sim('ATANASSIONI', 'ATANASIOU'), 8 / 11
)
self.assertAlmostEqual(
self.cmp.sim('LIESKOVSKY', 'LIESZKOVSZKY'), 10 / 12
)
self.assertAlmostEqual(self.cmp.sim('JEANETTE', 'JEANNETTE'), 8 / 9)
self.assertAlmostEqual(self.cmp.sim('JOHANNES', 'JOHAN'), 0.625)
self.assertAlmostEqual(self.cmp.sim('JOHANNES', 'HANS'), 0.375)
self.assertAlmostEqual(self.cmp.sim('JOHANNES', 'HANNES'), 0.75)
self.assertAlmostEqual(self.cmp.sim('ZIMMERMANN', 'SEMMERMANN'), 0.8)
self.assertAlmostEqual(self.cmp.sim('ZIMMERMANN', 'ZIMMERER'), 0.6)
self.assertAlmostEqual(self.cmp.sim('ZIMMERMANN', 'ZIMMER'), 0.6)
# Tests to maximize coverage
self.assertAlmostEqual(
Baystat(2, 2, 2).sim('ZIMMERMANN', 'SEMMERMANN'), 0.8
)
self.assertAlmostEqual(self.cmp.sim('ZIMMER', 'ZIMMERMANN'), 0.6)
def test_baystat_dist(self):
"""Test abydos.distance.Baystat.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0)
self.assertEqual(self.cmp.dist('Colin', ''), 1)
self.assertEqual(self.cmp.dist('Colin', 'Colin'), 0)
# Examples given in the paper
# https://www.statistik.bayern.de/medien/statistik/zensus/zusammenf__hrung_von_datenbest__nden_ohne_numerische_identifikatoren.pdf
self.assertAlmostEqual(self.cmp.dist('DRAKOMENA', 'DRAOMINA'), 2 / 9)
self.assertAlmostEqual(self.cmp.dist('RIEKI', 'RILKI'), 1 / 5)
self.assertAlmostEqual(
self.cmp.dist('ATANASSIONI', 'ATANASIOU'), 3 / 11
)
self.assertAlmostEqual(
self.cmp.dist('LIESKOVSKY', 'LIESZKOVSZKY'), 2 / 12
)
self.assertAlmostEqual(self.cmp.dist('JEANETTE', 'JEANNETTE'), 1 / 9)
self.assertAlmostEqual(self.cmp.dist('JOHANNES', 'JOHAN'), 0.375)
self.assertAlmostEqual(self.cmp.dist('JOHANNES', 'HANS'), 0.625)
self.assertAlmostEqual(self.cmp.dist('JOHANNES', 'HANNES'), 0.25)
self.assertAlmostEqual(self.cmp.dist('ZIMMERMANN', 'SEMMERMANN'), 0.2)
self.assertAlmostEqual(self.cmp.dist('ZIMMERMANN', 'ZIMMERER'), 0.4)
self.assertAlmostEqual(self.cmp.dist('ZIMMERMANN', 'ZIMMER'), 0.4)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 6,592,109,680,175,767,000 | 41.247312 | 138 | 0.65691 | false |
shakamunyi/sahara | sahara/tests/unit/plugins/storm/test_config_helper.py | 1 | 2392 | # Copyright 2017 Massachusetts Open Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import testcase
from sahara.plugins.storm import config_helper as s_config
from sahara.plugins.storm import plugin as s_plugin
class TestStormConfigHelper(testcase.TestCase):
def test_generate_storm_config(self):
STORM_092 = '0.9.2'
STORM_101 = '1.0.1'
STORM_110 = '1.1.0'
tested_versions = []
master_hostname = "s-master"
zk_hostnames = ["s-zoo"]
configs_092 = s_config.generate_storm_config(
master_hostname, zk_hostnames, STORM_092)
self.assertTrue('nimbus.host' in configs_092.keys())
self.assertFalse('nimbus.seeds' in configs_092.keys())
tested_versions.append(STORM_092)
configs_101 = s_config.generate_storm_config(
master_hostname, zk_hostnames, STORM_101)
self.assertFalse('nimbus.host' in configs_101.keys())
self.assertTrue('nimbus.seeds' in configs_101.keys())
self.assertTrue('client.jartransformer.class' in configs_101.keys())
self.assertEqual(configs_101['client.jartransformer.class'],
'org.apache.storm.hack.StormShadeTransformer')
tested_versions.append(STORM_101)
configs_110 = s_config.generate_storm_config(
master_hostname, zk_hostnames, STORM_110)
self.assertFalse('nimbus.host' in configs_110.keys())
self.assertTrue('nimbus.seeds' in configs_110.keys())
self.assertTrue('client.jartransformer.class' in configs_110.keys())
self.assertEqual(configs_110['client.jartransformer.class'],
'org.apache.storm.hack.StormShadeTransformer')
tested_versions.append(STORM_110)
storm = s_plugin.StormProvider()
self.assertEqual(storm.get_versions(), tested_versions)
| apache-2.0 | -3,610,181,268,468,644,400 | 45 | 77 | 0.677258 | false |
USGM/suds | suds/sudsobject.py | 1 | 11056 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{sudsobject} module provides a collection of suds objects
that are primarily used for the highly dynamic interactions with
wsdl/xsd defined types.
"""
from logging import getLogger
from suds import *
log = getLogger(__name__)
def items(sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
for item in sobject:
yield item
def asdict(sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return dict(items(sobject))
def merge(a, b):
"""
Merge all attributes and metadata from I{a} to I{b}.
@param a: A I{source} object
@type a: L{Object}
@param b: A I{destination} object
@type b: L{Object}
"""
for item in a:
setattr(b, item[0], item[1])
b.__metadata__ = b.__metadata__
return b
def footprint(sobject):
"""
Get the I{virtual footprint} of the object.
This is really a count of the attributes in the branch with a significant value.
@param sobject: A suds object.
@type sobject: L{Object}
@return: The branch footprint.
@rtype: int
"""
n = 0
for a in sobject.__keylist__:
v = getattr(sobject, a)
if v is None: continue
if isinstance(v, Object):
n += footprint(v)
continue
if hasattr(v, '__len__'):
if len(v): n += 1
continue
n +=1
return n
class Factory:
cache = {}
@classmethod
def subclass(cls, name, bases, dict={}):
if not isinstance(bases, tuple):
bases = (bases,)
name = name
key = '.'.join((name, str(bases)))
subclass = cls.cache.get(key)
if subclass is None:
subclass = type(name, bases, dict)
cls.cache[key] = subclass
return subclass
@classmethod
def object(cls, classname=None, dict={}):
if classname is not None:
subclass = cls.subclass(classname, Object)
inst = subclass()
else:
inst = Object()
for a in list(dict.items()):
setattr(inst, a[0], a[1])
return inst
@classmethod
def metadata(cls):
return Metadata()
@classmethod
def property(cls, name, value=None):
subclass = cls.subclass(name, Property)
return subclass(value)
class Object:
def __init__(self):
self.__keylist__ = []
self.__printer__ = Printer()
self.__metadata__ = Metadata()
def __setattr__(self, name, value):
builtin = name.startswith('__') and name.endswith('__')
if not builtin and \
name not in self.__keylist__:
self.__keylist__.append(name)
self.__dict__[name] = value
def __delattr__(self, name):
try:
del self.__dict__[name]
builtin = name.startswith('__') and name.endswith('__')
if not builtin:
self.__keylist__.remove(name)
except:
cls = self.__class__.__name__
raise AttributeError("%s has no attribute '%s'" % (cls, name))
def __getitem__(self, name):
if isinstance(name, int):
name = self.__keylist__[int(name)]
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __iter__(self):
return Iter(self)
def __len__(self):
return len(self.__keylist__)
def __contains__(self, name):
return name in self.__keylist__
def __repr__(self):
return str(self)
def __str__(self):
return self.__printer__.tostr(self)
class Iter:
def __init__(self, sobject):
self.sobject = sobject
self.keylist = self.__keylist(sobject)
self.index = 0
def __next__(self):
keylist = self.keylist
nkeys = len(self.keylist)
while self.index < nkeys:
k = keylist[self.index]
self.index += 1
if hasattr(self.sobject, k):
v = getattr(self.sobject, k)
return (k, v)
raise StopIteration()
def __keylist(self, sobject):
keylist = sobject.__keylist__
try:
keyset = set(keylist)
ordering = sobject.__metadata__.ordering
ordered = set(ordering)
if not ordered.issuperset(keyset):
log.debug(
'%s must be superset of %s, ordering ignored',
keylist,
ordering)
raise KeyError()
return ordering
except:
return keylist
def __iter__(self):
return self
class Metadata(Object):
def __init__(self):
self.__keylist__ = []
self.__printer__ = Printer()
class Facade(Object):
def __init__(self, name):
Object.__init__(self)
md = self.__metadata__
md.facade = name
class Property(Object):
def __init__(self, value):
Object.__init__(self)
self.value = value
def items(self):
for item in self:
if item[0] != 'value':
yield item
def get(self):
return self.value
def set(self, value):
self.value = value
return self
class Printer:
"""
Pretty printing of a Object object.
"""
@classmethod
def indent(cls, n): return '%*s'%(n*3,' ')
def tostr(self, object, indent=-2):
""" get s string representation of object """
history = []
return self.process(object, history, indent)
def process(self, object, h, n=0, nl=False):
""" print object using the specified indent (n) and newline (nl). """
if object is None:
return 'None'
if isinstance(object, Object):
if len(object) == 0:
return '<empty>'
else:
return self.print_object(object, h, n+2, nl)
if isinstance(object, dict):
if len(object) == 0:
return '<empty>'
else:
return self.print_dictionary(object, h, n+2, nl)
if isinstance(object, (list,tuple)):
if len(object) == 0:
return '<empty>'
else:
return self.print_collection(object, h, n+2)
if isinstance(object, str):
return '"%s"' % tostr(object)
return '%s' % tostr(object)
def print_object(self, d, h, n, nl=False):
""" print complex using the specified indent (n) and newline (nl). """
s = []
cls = d.__class__
md = d.__metadata__
if d in h:
s.append('(')
s.append(cls.__name__)
s.append(')')
s.append('...')
return ''.join(s)
h.append(d)
if nl:
s.append('\n')
s.append(self.indent(n))
if cls != Object:
s.append('(')
if isinstance(d, Facade):
s.append(md.facade)
else:
s.append(cls.__name__)
s.append(')')
s.append('{')
for item in d:
if self.exclude(d, item):
continue
item = self.unwrap(d, item)
s.append('\n')
s.append(self.indent(n+1))
if isinstance(item[1], (list,tuple)):
s.append(item[0])
s.append('[]')
else:
s.append(item[0])
s.append(' = ')
s.append(self.process(item[1], h, n, True))
s.append('\n')
s.append(self.indent(n))
s.append('}')
h.pop()
return ''.join(s)
def print_dictionary(self, d, h, n, nl=False):
""" print complex using the specified indent (n) and newline (nl). """
if d in h: return '{}...'
h.append(d)
s = []
if nl:
s.append('\n')
s.append(self.indent(n))
s.append('{')
for item in list(d.items()):
s.append('\n')
s.append(self.indent(n+1))
if isinstance(item[1], (list,tuple)):
s.append(tostr(item[0]))
s.append('[]')
else:
s.append(tostr(item[0]))
s.append(' = ')
s.append(self.process(item[1], h, n, True))
s.append('\n')
s.append(self.indent(n))
s.append('}')
h.pop()
return ''.join(s)
def print_collection(self, c, h, n):
""" print collection using the specified indent (n) and newline (nl). """
if c in h: return '[]...'
h.append(c)
s = []
for item in c:
s.append('\n')
s.append(self.indent(n))
s.append(self.process(item, h, n-2))
s.append(',')
h.pop()
return ''.join(s)
def unwrap(self, d, item):
""" translate (unwrap) using an optional wrapper function """
nopt = ( lambda x: x )
try:
md = d.__metadata__
pmd = getattr(md, '__print__', None)
if pmd is None:
return item
wrappers = getattr(pmd, 'wrappers', {})
fn = wrappers.get(item[0], nopt)
return (item[0], fn(item[1]))
except:
pass
return item
def exclude(self, d, item):
""" check metadata for excluded items """
try:
md = d.__metadata__
pmd = getattr(md, '__print__', None)
if pmd is None:
return False
excludes = getattr(pmd, 'excludes', [])
return ( item[0] in excludes )
except:
pass
return False | lgpl-3.0 | 1,173,960,176,114,197,800 | 27.645078 | 84 | 0.510492 | false |
lmazuel/azure-sdk-for-python | azure-batch/azure/batch/operations/account_operations.py | 1 | 10813 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class AccountOperations(object):
"""AccountOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API Version. Constant value: "2018-03-01.6.1".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-03-01.6.1"
self.config = config
def list_node_agent_skus(
self, account_list_node_agent_skus_options=None, custom_headers=None, raw=False, **operation_config):
"""Lists all node agent SKUs supported by the Azure Batch service.
:param account_list_node_agent_skus_options: Additional parameters for
the operation
:type account_list_node_agent_skus_options:
~azure.batch.models.AccountListNodeAgentSkusOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NodeAgentSku
:rtype:
~azure.batch.models.NodeAgentSkuPaged[~azure.batch.models.NodeAgentSku]
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
filter = None
if account_list_node_agent_skus_options is not None:
filter = account_list_node_agent_skus_options.filter
max_results = None
if account_list_node_agent_skus_options is not None:
max_results = account_list_node_agent_skus_options.max_results
timeout = None
if account_list_node_agent_skus_options is not None:
timeout = account_list_node_agent_skus_options.timeout
client_request_id = None
if account_list_node_agent_skus_options is not None:
client_request_id = account_list_node_agent_skus_options.client_request_id
return_client_request_id = None
if account_list_node_agent_skus_options is not None:
return_client_request_id = account_list_node_agent_skus_options.return_client_request_id
ocp_date = None
if account_list_node_agent_skus_options is not None:
ocp_date = account_list_node_agent_skus_options.ocp_date
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_node_agent_skus.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.NodeAgentSkuPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NodeAgentSkuPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_node_agent_skus.metadata = {'url': '/nodeagentskus'}
def list_pool_node_counts(
self, account_list_pool_node_counts_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets the number of nodes in each state, grouped by pool.
:param account_list_pool_node_counts_options: Additional parameters
for the operation
:type account_list_pool_node_counts_options:
~azure.batch.models.AccountListPoolNodeCountsOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PoolNodeCounts
:rtype:
~azure.batch.models.PoolNodeCountsPaged[~azure.batch.models.PoolNodeCounts]
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
filter = None
if account_list_pool_node_counts_options is not None:
filter = account_list_pool_node_counts_options.filter
max_results = None
if account_list_pool_node_counts_options is not None:
max_results = account_list_pool_node_counts_options.max_results
timeout = None
if account_list_pool_node_counts_options is not None:
timeout = account_list_pool_node_counts_options.timeout
client_request_id = None
if account_list_pool_node_counts_options is not None:
client_request_id = account_list_pool_node_counts_options.client_request_id
return_client_request_id = None
if account_list_pool_node_counts_options is not None:
return_client_request_id = account_list_pool_node_counts_options.return_client_request_id
ocp_date = None
if account_list_pool_node_counts_options is not None:
ocp_date = account_list_pool_node_counts_options.ocp_date
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_pool_node_counts.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=10, minimum=1)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.PoolNodeCountsPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PoolNodeCountsPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_pool_node_counts.metadata = {'url': '/nodecounts'}
| mit | -5,444,836,896,498,578,000 | 46.634361 | 148 | 0.626468 | false |
tombstone/models | official/nlp/nhnet/optimizer.py | 1 | 3030 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer and learning rate scheduler."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from official.modeling.hyperparams import params_dict
class LearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Learning rate schedule."""
def __init__(self, initial_learning_rate, hidden_size, warmup_steps):
"""Initialize configuration of the learning rate schedule.
Args:
initial_learning_rate: A float, the initial learning rate.
hidden_size: An integer, the model dimension in the hidden layers.
warmup_steps: An integer, the number of steps required for linear warmup.
"""
super(LearningRateSchedule, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.hidden_size = hidden_size
self.warmup_steps = tf.cast(warmup_steps, tf.float32)
def __call__(self, global_step):
"""Calculate learning rate with linear warmup and rsqrt decay.
Args:
global_step: An integer, the current global step used for learning rate
calculation.
Returns:
A float, the learning rate needs to be used for current global step.
"""
with tf.name_scope('learning_rate_schedule'):
global_step = tf.cast(global_step, tf.float32)
learning_rate = self.initial_learning_rate
learning_rate *= (self.hidden_size**-0.5)
# Apply linear warmup
learning_rate *= tf.minimum(1.0, global_step / self.warmup_steps)
# Apply rsqrt decay
learning_rate /= tf.sqrt(tf.maximum(global_step, self.warmup_steps))
return learning_rate
def get_config(self):
"""Get the configuration of the learning rate schedule."""
return {
'initial_learning_rate': self.initial_learning_rate,
'hidden_size': self.hidden_size,
'warmup_steps': self.warmup_steps,
}
def create_optimizer(params: params_dict.ParamsDict):
"""Creates optimizer."""
lr_schedule = LearningRateSchedule(
params.learning_rate,
params.hidden_size,
params.learning_rate_warmup_steps)
return tf.keras.optimizers.Adam(
learning_rate=lr_schedule,
beta_1=params.adam_beta1,
beta_2=params.adam_beta2,
epsilon=params.adam_epsilon)
| apache-2.0 | 7,990,494,737,155,448,000 | 35.95122 | 80 | 0.690099 | false |
gem/oq-hazardlib | openquake/hmtk/sources/complex_fault_source.py | 1 | 9353 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (c) 2010-2017, GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# ([email protected]).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
# -*- coding: utf-8 -*-
'''
Defines the :class:
`openquake.hmtk.sources.complex_fault_source.mtkComplexFaultSource`, which
represents the openquake.hmtk defition of a complex fault source. This extends
the :class:`nrml.models.ComplexFaultSource`
'''
import warnings
import numpy as np
from math import fabs
from openquake.hazardlib.geo.point import Point
from openquake.hazardlib.geo.line import Line
from openquake.hazardlib.geo.surface.complex_fault import ComplexFaultSurface
from openquake.hazardlib.source.complex_fault import ComplexFaultSource
import openquake.hmtk.sources.source_conversion_utils as conv
class mtkComplexFaultSource(object):
'''
New class to describe the mtk complex fault source object
:param str identifier:
ID code for the source
:param str name:
Source name
:param str trt:
Tectonic region type
:param geometry:
Instance of :class: nhlib.geo.surface.complex_fault.ComplexFaultSource
:param str mag_scale_rel:
Magnitude scaling relationsip
:param float rupt_aspect_ratio:
Rupture aspect ratio
:param mfd:
Magnitude frequency distribution as instance of
:class:`nrml.models.IncrementalMFD` or
:class:`nrml.models.TGRMFD`
:param float rake:
Rake of fault
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param catalogue:
Earthquake catalogue associated to source as instance of
:class:`openquake.hmtk.seismicity.catalogue.Catalogue` object
'''
def __init__(self, identifier, name, trt=None, geometry=None,
mag_scale_rel=None, rupt_aspect_ratio=None, mfd=None,
rake=None):
'''
Instantiate class with just the basic attributes: identifier and name
'''
self.typology = 'ComplexFault'
self.id = identifier
self.name = name
self.trt = trt
self.geometry = geometry
self.fault_edges = None
self.mag_scale_rel = mag_scale_rel
self.rupt_aspect_ratio = rupt_aspect_ratio
self.mfd = mfd
self.rake = rake
self.upper_depth = None
self.lower_depth = None
self.catalogue = None
self.dip = None
def create_geometry(self, input_geometry, mesh_spacing=1.0):
'''
If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
List of at least two fault edges of the fault source from
shallowest to deepest. Each edge can be represented as as either
i) instance of nhlib.geo.polygon.Polygon class
ii) numpy.ndarray [Longitude, Latitude, Depth]
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0}
'''
if not isinstance(input_geometry, list) or len(input_geometry) < 2:
raise ValueError('Complex fault geometry incorrectly defined')
self.fault_edges = []
for edge in input_geometry:
if not isinstance(edge, Line):
if not isinstance(edge, np.ndarray):
raise ValueError('Unrecognised or unsupported geometry '
'definition')
else:
self.fault_edges.append(Line([Point(row[0], row[1], row[2])
for row in edge]))
else:
self.fault_edges.append(edge)
# Updates the upper and lower sesmogenic depths to reflect geometry
self._get_minmax_edges(edge)
# Build fault surface
self.geometry = ComplexFaultSurface.from_fault_data(self.fault_edges,
mesh_spacing)
# Get a mean dip
self.dip = self.geometry.get_dip()
def _get_minmax_edges(self, edge):
'''
Updates the upper and lower depths based on the input edges
'''
if isinstance(edge, Line):
# For instance of line class need to loop over values
depth_vals = np.array([node.depth for node in edge.points])
else:
depth_vals = edge[:, 2]
temp_upper_depth = np.min(depth_vals)
if not self.upper_depth:
self.upper_depth = temp_upper_depth
else:
if temp_upper_depth < self.upper_depth:
self.upper_depth = temp_upper_depth
temp_lower_depth = np.max(depth_vals)
if not self.lower_depth:
self.lower_depth = temp_lower_depth
else:
if temp_lower_depth > self.lower_depth:
self.lower_depth = temp_lower_depth
def select_catalogue(self, selector, distance,
distance_metric='joyner-boore', upper_eq_depth=None,
lower_eq_depth=None):
'''
Selects earthquakes within a distance of the fault
:param selector:
Populated instance of :class:
`openquake.hmtk.seismicity.selector.CatalogueSelector`
:param distance:
Distance from point (km) for selection
:param str distance_metric
Choice of fault source distance metric 'joyner-boore' or 'rupture'
:param float upper_eq_depth:
Upper hypocentral depth of hypocentres to be selected
:param float lower_eq_depth:
Lower hypocentral depth of hypocentres to be selected
'''
if selector.catalogue.get_number_events() < 1:
raise ValueError('No events found in catalogue!')
# If dip is != 90 and 'rupture' distance metric is selected
if ('rupture' in distance_metric) and (fabs(self.dip - 90) > 1E-5):
# Use rupture distance
self.catalogue = selector.within_rupture_distance(
self.geometry,
distance,
upper_depth=upper_eq_depth,
lower_depth=lower_eq_depth)
else:
# Use Joyner-Boore distance
self.catalogue = selector.within_joyner_boore_distance(
self.geometry,
distance,
upper_depth=upper_eq_depth,
lower_depth=lower_eq_depth)
if self.catalogue.get_number_events() < 5:
# Throw a warning regarding the small number of earthquakes in
# the source!
warnings.warn('Source %s (%s) has fewer than 5 events'
% (self.id, self.name))
def create_oqhazardlib_source(self, tom, mesh_spacing, use_defaults=False):
"""
Creates an instance of the source model as :class:
openquake.hazardlib.source.complex_fault.ComplexFaultSource
"""
if not self.mfd:
raise ValueError("Cannot write to hazardlib without MFD")
return ComplexFaultSource(
self.id,
self.name,
self.trt,
self.mfd,
mesh_spacing,
conv.mag_scale_rel_to_hazardlib(self.mag_scale_rel, use_defaults),
conv.render_aspect_ratio(self.rupt_aspect_ratio, use_defaults),
tom,
self.fault_edges,
self.rake)
| agpl-3.0 | -315,525,468,201,625,000 | 37.331967 | 79 | 0.63338 | false |
AutoGentoo/AutoGentoo | autogentoo/portage/resolve.py | 1 | 22795 | import warnings
from abc import ABC, abstractmethod
from queue import Queue
from typing import List, Optional, Dict, Generator, Tuple, Set
from autogentoo.cportage import (
get_portage,
Dependency,
Atom,
Ebuild,
UseFlag,
UseOperatorT,
Portage,
AtomUseT,
AtomUseDefaultT,
RequiredUse,
AtomBlockT,
)
from autogentoo.portage import (
RequiredUseException,
DependencyContainer,
ResolutionException,
UseSuggestion,
SuggestionExpression,
InvalidExpressionException,
)
__emerge_session__: Optional["Emerge"] = None
def emerge_init(emerge: "Emerge"):
global __emerge_session__
__emerge_session__ = emerge
def emerge_session() -> "Emerge":
global __emerge_session__
if __emerge_session__ is None:
raise RuntimeError(
"emerge_init() must be called before emerge_session() is called"
)
return __emerge_session__
def resolve_single(
parent: Optional["SelectedEbuild"], depend_expr: Dependency
) -> Optional["ResolveDependency"]:
if parent is None and depend_expr.atom is None:
raise ResolutionException(
"Use condition expressions are not valid at global scope"
)
if depend_expr.atom is not None: # Simple atom selection
# Check if this is a blocker
if depend_expr.atom.blocks != AtomBlockT.NONE:
emerge_session().add_block(parent, depend_expr.atom)
return None
sel_ebuild = emerge_session().select_atom(parent, depend_expr.atom)
if depend_expr.atom.useflags is None:
return sel_ebuild
for use in depend_expr.atom.useflags:
if not sel_ebuild.has_use(use.name):
# Use the use default because it doesn't exist in
# in the IUSE
default = AtomUseDefaultT(use.default)
if default == AtomUseDefaultT.NONE:
# We have no fallback when this useflag doesn't exist
# This is an error
raise KeyError(
"Invalid use flag '%s' for atom '%s'"
% (use.name, depend_expr.atom)
)
atom_flag = (
AtomUseT.ENABLE
if default == AtomUseDefaultT.ON
else AtomUseT.DISABLE
)
sel_ebuild.add_use(use.name, atom_flag == AtomUseT.ENABLE)
sel_ebuild.add_use_requirement(use.name, atom_flag)
else:
sel_ebuild.add_use_requirement(use.name, AtomUseT(use.option))
return sel_ebuild
else:
if depend_expr.use_condition != 0:
assert depend_expr.use_operator in (
UseOperatorT.ENABLE,
UseOperatorT.DISABLE,
)
# Simple use condition
use_flag = get_portage().get_use_flag(depend_expr.use_condition)
use_flag = UseFlag(
use_flag.name,
True if depend_expr.use_condition == UseOperatorT.ENABLE else False,
)
assert depend_expr.children is not None, "Invalid dependency expression"
conditional = UseConditional(parent, use_flag, depend_expr.children)
parent.add_use_hook(use_flag, conditional)
else:
warnings.warn(
"Complex use selection is not implemented yet (%s)" % parent.ebuild.key
)
def resolve_all(
parent: Optional["SelectedEbuild"], depend: Dependency
) -> Generator["ResolveDependency", None, None]:
for dep in depend:
resolved = resolve_single(parent, dep)
if resolved is not None:
yield resolved
class ResolveDependency(ABC):
_is_dirty: bool
def is_dirty(self) -> bool:
return self._is_dirty
@abstractmethod
def get_resolved(self) -> Optional["ResolveDependency"]:
...
class Hookable(ResolveDependency, ABC):
@abstractmethod
def run_hook(self, arg):
...
class UseSelection(Hookable):
enforcing: bool
target_value: bool
parent: "SelectedEbuild"
use_flag: UseFlag
flag: AtomUseT
def __init__(self, parent: "SelectedEbuild", use_flag: UseFlag, flag: AtomUseT):
self.parent = parent
self.use_flag = use_flag
self.flag = flag
self.target_value = use_flag.state
self.enforcing = False
if self.flag == AtomUseT.ENABLE:
self.target_value = True
self.enforcing = True
parent.schedule_use(UseFlag(use_flag.name, True))
elif self.flag == AtomUseT.DISABLE:
self.target_value = False
self.enforcing = True
parent.schedule_use(UseFlag(use_flag.name, False))
elif self.flag == AtomUseT.DISABLE_IF_OFF:
if not self.target_value:
self.enforcing = True
elif self.flag == AtomUseT.ENABLE_IF_ON:
if self.target_value:
self.enforcing = True
elif self.flag == AtomUseT.EQUAL:
self.enforcing = True
elif self.flag == AtomUseT.OPPOSITE:
self.enforcing = True
self.target_value = not self.target_value
parent.schedule_use(UseFlag(use_flag.name, self.target_value))
def run_hook(self, arg: bool):
"""
Make sure that this flag will
not get an invalid value
:param arg:
:return:
"""
if self.enforcing:
if arg != self.target_value:
raise RequiredUseException(
UseSuggestion(self.use_flag.name, self.target_value)
)
def get_resolved(self) -> Optional["ResolveDependency"]:
# This hook does not run any
# dependency resolution
return None
class RequiredUseHook(Hookable):
expression: RequiredUse
ebuild: "SelectedEbuild"
def __init__(self, selected_ebuild: "SelectedEbuild", required_use: RequiredUse):
self.expression = required_use
self.ebuild = selected_ebuild
def run_hook(self, arg: bool):
"""
Evaluate the flags in the required use
expression to make sure we have a match
:param arg: flag state that changed (unused)
:return: None
"""
def evaluate_required_use(
operator: SuggestionExpression.Operator, expr: RequiredUse
) -> Tuple[SuggestionExpression, int, int]:
"""
Count the number of expressions
that evaluate to True.
:param operator: operator for suggestion
:param expr: expression to verify
:return: (suggestions, num_true, total)
"""
n = 0
k = 0
suggestion = SuggestionExpression(operator)
for req_use in expr:
n += 1
op = UseOperatorT(req_use.operator)
if op == UseOperatorT.ENABLE or op == UseOperatorT.DISABLE:
target = op == UseOperatorT.ENABLE
state = self.ebuild.get_use(req_use.name).state
if req_use.depend is None and state == target:
k += 1
elif state == target:
# This is a conditional expression
child_suggestion, k_c, n_c = evaluate_required_use(
SuggestionExpression.Operator.AND, req_use.depend
)
if k_c == n_c:
k += 1
else:
# There are two different options here
# Either disable this useflag
# or try to meet its requirements
s = SuggestionExpression(
SuggestionExpression.Operator.LEAST_ONE
)
s.append(UseSuggestion(req_use.name, not state))
s.append(child_suggestion)
suggestion.append(s)
elif req_use.depend is not None and state == target:
k += 1
else:
suggestion.append(UseSuggestion(req_use.name, not state))
elif op == UseOperatorT.LEAST_ONE:
child_suggestion, k_c, n_c = evaluate_required_use(
SuggestionExpression.Operator.LEAST_ONE, req_use.depend
)
if k_c >= 1:
k += 1
else:
suggestion.append(child_suggestion)
elif op == UseOperatorT.EXACT_ONE:
child_suggestion, k_c, n_c = evaluate_required_use(
SuggestionExpression.Operator.EXACT_ONE, req_use.depend
)
if k_c == 1:
k += 1
else:
suggestion.append(child_suggestion)
elif op == UseOperatorT.MOST_ONE:
child_suggestion, k_c, n_c = evaluate_required_use(
SuggestionExpression.Operator.MOST_ONE, req_use.depend
)
if k_c <= 1:
k += 1
else:
suggestion.append(child_suggestion)
else:
raise InvalidExpressionException(
"Required use operator '%s' is not valid" % op
)
return suggestion, k, n
suggestions, g_k, g_n = evaluate_required_use(
SuggestionExpression.Operator.AND, self.expression
)
if g_k != g_n:
print(
UseOperatorT(self.expression.operator),
self.expression.name,
self.ebuild,
)
print("%d %d" % (g_k, g_n), flush=True)
raise RequiredUseException(suggestions)
def get_resolved(self) -> Optional["ResolveDependency"]:
return None
class UseConditional(Hookable):
parent: "SelectedEbuild"
# Should we raise an error if this condition is ever false?
required: bool
# The target state required for this expression to be analyzed
useflag: UseFlag
# The guarded dependency used to re-calculated
dependency: Optional[Dependency]
current_evaluation: Optional["ResolveDependency"]
def __init__(
self,
parent: "SelectedEbuild",
useflag: UseFlag,
expression: Optional[Dependency],
required=False,
):
self.parent = parent
self.useflag = useflag
self.required = required
self.dependency = expression
self.current_evaluation = None
self._is_dirty = False
def get_resolved(self) -> Optional["ResolveDependency"]:
if self.dependency is None:
return None
if self._is_dirty or self.current_evaluation is None:
self.current_evaluation = resolve_single(self.parent, self.dependency)
return self.current_evaluation
def run_hook(self, flag_state: bool):
# Only evaluate the expression if our condition is met
if self.useflag.state != flag_state:
if self.required:
raise RequiredUseException(
UseSuggestion(self.useflag.name, self.useflag.state)
)
# Mark this expression to re-evaluate the dependencies
self._is_dirty = True
self.current_evaluation = None
return
self._is_dirty = False
class SelectedEbuild(ResolveDependency):
selected_by: Dict[Atom, Optional["SelectedEbuild"]]
# The original ebuild
ebuild: Ebuild
# The useflag delta from IUSE of the original ebuild
useflags: Dict[str, UseFlag]
# Use requirements
use_requirements: Dict[str, AtomUseT]
# Triggers that run when a use flag is changes
use_flag_hooks: Dict[str, List[Hookable]]
global_flag_hooks: List[Hookable] # Trigger when any use flag is changed
# Flags to set next time we regenerate
flags: Queue[UseFlag]
generators: DependencyContainer[ResolveDependency]
resolved_deps: DependencyContainer[ResolveDependency]
resolve_session: "PackageResolutionSession"
def __init__(
self,
parent: Optional["SelectedEbuild"],
atom: Atom,
ebuild: Ebuild,
resolve_session: "PackageResolutionSession",
):
self.selected_by = {}
self.ebuild = ebuild
self.useflags = {}
self.use_flag_hooks = {}
self.global_flag_hooks = []
self.flags = Queue()
self.resolve_session = resolve_session
self.generators = DependencyContainer[ResolveDependency]()
self.resolved_deps = DependencyContainer[ResolveDependency]()
self._is_dirty = True
self.add_selected_by(parent, atom)
def get_resolved(self) -> Optional[ResolveDependency]:
if self.resolve_session.check_resolved(self):
return self
self.resolve_session.add_to_session(self)
self.regenerate()
return self
def add_selected_by(self, parent: Optional["SelectedEbuild"], atom: Atom):
self.selected_by[atom] = parent
def change_within_slot(self, atom: Atom) -> bool:
"""
Change the currently selected ebuild
to another ebuild within the same slot
:param atom: try to match an ebuild to every dependency+this
:return: True it can be done
"""
ebuild_match: Optional[Ebuild] = None
for ebuild in self.ebuild.package:
all_match = atom.matches(ebuild)
for prev_atom in self.selected_by:
all_match = prev_atom.matches(ebuild)
if not all_match:
break
if all_match:
ebuild_match = ebuild
break
if ebuild_match is None:
return False
self._is_dirty = True
self.ebuild = ebuild_match
return True
def regenerate(self):
"""
Refresh all children
:return:
"""
if self._is_dirty:
# A new ebuild was selected
# We need to regenerate the generators
self.generators.clear()
if not self.ebuild.metadata_init:
self.ebuild.initialize_metadata()
# Update use flags by flushing the flag buffer
self.flush_use()
# Make sure all use-flags conform to requirements
if self.ebuild.required_use is not None:
self.add_use_hook(None, RequiredUseHook(self, self.ebuild.required_use))
for i, dep_type in enumerate(
(
self.ebuild.bdepend,
self.ebuild.depend,
self.ebuild.rdepend,
self.ebuild.pdepend,
)
):
if dep_type is None:
continue
for dep in resolve_all(self, dep_type):
self.generators[i].append(dep)
self._is_dirty = False
# Regenerate children (recursively)
# All non-dirty expressions are already cached
# We can just remove everyone and re-append
self.resolved_deps.clear()
# Regenerate the dependency with the dirty flag enabled
i = 0
for dep_type in self.generators:
for generator in dep_type:
if generator is None:
continue
resolved = generator.get_resolved()
if resolved is None:
continue
self.resolved_deps[i].append(resolved)
i += 1
def has_use(self, name: str) -> bool:
return name in self.useflags or name in self.ebuild.iuse
def get_use(self, name: str) -> UseFlag:
if name not in self.useflags:
return self.ebuild.iuse[name]
return self.useflags[name]
def add_use(self, name: str, value: bool):
"""
Add a non-existent useflag
:param name: name of useflag
:param value: default value
:return:
"""
self.useflags[name] = UseFlag(name, value)
def add_use_requirement(self, name: str, flag: AtomUseT):
"""
Select a use flag required by an atom
:param name: name of the atom flag
:param flag: atom flag setting
:return:
"""
use = self.get_use(name)
self.add_use_hook(use, UseSelection(self, use, flag))
def add_use_hook(self, useflag: Optional[UseFlag], hook: Hookable):
"""
Associate an action with a flag changing
:param useflag: useflag to run hook on, None for all useflags
:param hook: action to take when useflag is changed
:return:
"""
if useflag is None:
self.global_flag_hooks.append(hook)
hook.run_hook(None)
return
if useflag.name not in self.use_flag_hooks:
self.use_flag_hooks[useflag.name] = []
self.use_flag_hooks[useflag.name].append(hook)
hook.run_hook(self.get_use(useflag.name).state)
def schedule_use(self, useflag: UseFlag):
"""
Update a useflag upon regeneration
:param useflag: useflag to update
:return: None
"""
self.flags.put(useflag)
self._is_dirty = True
def flush_use(self):
"""
Update all of the buffered useflags
and run their change hooks.
:return: None
"""
while not self.flags.empty():
useflag = self.flags.get()
# Only run the hooks if there is a change in state
if self.get_use(useflag.name).state != useflag.state:
self.useflags[useflag.name] = useflag
# Run all of the use-hooks for this flag
if useflag.name in self.use_flag_hooks:
for hook in self.use_flag_hooks[useflag.name]:
hook.run_hook(useflag.state)
# Run global use-hooks
for hook in self.global_flag_hooks:
hook.run_hook(None)
def __hash__(self):
return id(self)
def __repr__(self):
return "SelectedEbuild<%s %s>" % (self.ebuild.key, id(self))
class InstallPackage:
key: str
selected_ebuild_slots: Dict[str, SelectedEbuild]
resolve_session: "PackageResolutionSession"
def __init__(self, key: str, resolve_session: "PackageResolutionSession"):
self.key = key
self.selected_ebuild_slots = {}
self.resolve_session = resolve_session
def match_atom(
self, parent: Optional["SelectedEbuild"], atom: Atom, ebuild: Ebuild
) -> SelectedEbuild:
for slot in self.selected_ebuild_slots:
if atom.matches(self.selected_ebuild_slots[slot].ebuild):
return self.selected_ebuild_slots[slot]
# Check if slot has already been selected
if ebuild.slot is not None and ebuild.slot in self.selected_ebuild_slots:
# See if this atom matches the selected ebuild
sel_ebuild = self.selected_ebuild_slots[ebuild.slot]
if atom.matches(sel_ebuild.ebuild):
return sel_ebuild # We're good
# See if we can change the selected ebuild to match this atom
if sel_ebuild.change_within_slot(atom):
return sel_ebuild # Works!
else:
raise NotImplementedError("Cannot split a slot into multi-slot yet!")
elif ebuild.slot is not None:
# See if this atom matches any of the currently scheduled slots
for key, sel_ebuild in self.selected_ebuild_slots.items():
if atom.matches(sel_ebuild.ebuild):
return sel_ebuild
# We need to create a new selected ebuild and add it here
return self.add_atom(parent, atom, ebuild)
def add_atom(
self, parent: Optional["SelectedEbuild"], atom: Atom, ebuild: Ebuild
) -> SelectedEbuild:
"""
Add a SelectedEbuild in its slot
:param parent: parent package that triggered this
:param atom: atom that selected with ebuild
:param ebuild: ebuild selected by atom
:return: SelectedEbuild generated from the atom+ebuild
"""
sel_ebuild = SelectedEbuild(parent, atom, ebuild, self.resolve_session)
self.selected_ebuild_slots[sel_ebuild.ebuild.slot] = sel_ebuild
return sel_ebuild
class Emerge:
portage: Portage
selected_packages: Dict[str, InstallPackage]
blocks: Dict[str, List[Atom]]
resolve_session: "PackageResolutionSession"
def __init__(self, resolve_session: "PackageResolutionSession"):
self.portage = get_portage()
self.selected_packages = {}
self.blocks = {}
self.resolve_session = resolve_session
def add_block(self, parent: Optional["SelectedEbuild"], atom: Atom):
"""
Block ebuilds match this atom from being selected
:param parent: ebuild that selected this block
:param atom: atom to block
:return:
"""
if atom.key not in self.blocks:
self.blocks[atom.key] = []
self.blocks[atom.key].append(atom)
def select_atom(
self, parent: Optional["SelectedEbuild"], atom: Atom
) -> SelectedEbuild:
ebuild = self.portage.match_atom(atom)
if ebuild is None:
raise ResolutionException("No ebuild to match '%s' could be found" % atom)
if ebuild.package_key in self.selected_packages:
# Ebuild with this key has already been selected
# See if we can match this to an existing slot
install_pkg = self.selected_packages[ebuild.package_key]
return install_pkg.match_atom(parent, atom, ebuild)
else:
pkg = InstallPackage(atom.key, self.resolve_session)
self.selected_packages[pkg.key] = pkg
return pkg.add_atom(parent, atom, ebuild)
class PackageResolutionSession:
current_resolution: Set[SelectedEbuild]
def __init__(self):
self.current_resolution = set()
def check_resolved(self, ebuild: SelectedEbuild) -> bool:
return ebuild in self.current_resolution
def add_to_session(self, ebuild):
self.current_resolution.add(ebuild)
def clear(self):
self.current_resolution.clear()
| gpl-3.0 | 1,697,455,195,590,679,300 | 31.940751 | 88 | 0.567624 | false |
rochacbruno/dynaconf | dynaconf/vendor/ruamel/yaml/scalarfloat.py | 1 | 2326 | from __future__ import print_function,absolute_import,division,unicode_literals
_B=False
_A=None
import sys
from .compat import no_limit_int
from .anchor import Anchor
if _B:from typing import Text,Any,Dict,List
__all__=['ScalarFloat','ExponentialFloat','ExponentialCapsFloat']
class ScalarFloat(float):
def __new__(D,*E,**A):
F=A.pop('width',_A);G=A.pop('prec',_A);H=A.pop('m_sign',_A);I=A.pop('m_lead0',0);J=A.pop('exp',_A);K=A.pop('e_width',_A);L=A.pop('e_sign',_A);M=A.pop('underscore',_A);C=A.pop('anchor',_A);B=float.__new__(D,*E,**A);B._width=F;B._prec=G;B._m_sign=H;B._m_lead0=I;B._exp=J;B._e_width=K;B._e_sign=L;B._underscore=M
if C is not _A:B.yaml_set_anchor(C,always_dump=True)
return B
def __iadd__(A,a):return float(A)+a;B=type(A)(A+a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __ifloordiv__(A,a):return float(A)//a;B=type(A)(A//a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __imul__(A,a):return float(A)*a;B=type(A)(A*a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;B._prec=A._prec;return B
def __ipow__(A,a):return float(A)**a;B=type(A)(A**a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __isub__(A,a):return float(A)-a;B=type(A)(A-a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
@property
def anchor(self):
A=self
if not hasattr(A,Anchor.attrib):setattr(A,Anchor.attrib,Anchor())
return getattr(A,Anchor.attrib)
def yaml_anchor(A,any=_B):
if not hasattr(A,Anchor.attrib):return _A
if any or A.anchor.always_dump:return A.anchor
return _A
def yaml_set_anchor(A,value,always_dump=_B):A.anchor.value=value;A.anchor.always_dump=always_dump
def dump(A,out=sys.stdout):out.write('ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}, _:{}|{}, w:{}, s:{})\n'.format(A,A._width,A._prec,A._m_sign,A._m_lead0,A._underscore,A._exp,A._e_width,A._e_sign))
class ExponentialFloat(ScalarFloat):
def __new__(A,value,width=_A,underscore=_A):return ScalarFloat.__new__(A,value,width=width,underscore=underscore)
class ExponentialCapsFloat(ScalarFloat):
def __new__(A,value,width=_A,underscore=_A):return ScalarFloat.__new__(A,value,width=width,underscore=underscore) | mit | -8,955,087,405,345,137,000 | 69.515152 | 311 | 0.677988 | false |
Wen777/beets | beetsplug/freedesktop.py | 1 | 2206 | # This file is part of beets.
# Copyright 2014, Matt Lichtenberg.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Creates freedesktop.org-compliant .directory files on an album level.
"""
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.ui import decargs
import os
import logging
log = logging.getLogger('beets.freedesktop')
def process_query(lib, opts, args):
for album in lib.albums(decargs(args)):
process_album(album)
def process_album(album):
albumpath = album.item_dir()
if album.artpath:
fullartpath = album.artpath
artfile = os.path.split(fullartpath)[1]
create_file(albumpath, artfile)
else:
log.debug(u'freedesktop: album has no art')
def create_file(albumpath, artfile):
file_contents = "[Desktop Entry]\nIcon=./" + artfile
outfilename = os.path.join(albumpath, ".directory")
if not os.path.exists(outfilename):
file = open(outfilename, 'w')
file.write(file_contents)
file.close()
class FreedesktopPlugin(BeetsPlugin):
def __init__(self):
super(FreedesktopPlugin, self).__init__()
self.config.add({
'auto': False
})
self.register_listener('album_imported', self.imported)
def commands(self):
freedesktop_command = Subcommand("freedesktop",
help="Create .directory files")
freedesktop_command.func = process_query
return [freedesktop_command]
def imported(self, lib, album):
automatic = self.config['auto'].get(bool)
if not automatic:
return
process_album(album)
| mit | 3,143,787,163,430,786,000 | 30.070423 | 72 | 0.677244 | false |
angstwad/linky | linky/bookmarklet.py | 1 | 1564 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Paul Durivage <[email protected]>
#
# This file is part of linky.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
def bookmarklet(url):
enc_url = urllib.quote("'%s'" % url)
bkm_str = "javascript:(function()%%7Bfunction%%20callback()%%7B(function(%%24)%%7Bvar%%20jQuery%%3D%%24%%3B%%24.ajax(%%7Burl%%3A%%20%s%%2Ctype%%3A%%20'post'%%2Cdata%%3A%%20%%7Burl%%3A%%20document.URL%%2Ctitle%%3A%%20document.title%%7D%%2Csuccess%%3A%%20function(data%%2C%%20status)%%20%%7Bconsole.log(data)%%7D%%2Cerror%%3A%%20function(data%%2C%%20status)%%20%%7Bconsole.log(data)%%7D%%7D)%%7D)(jQuery.noConflict(true))%%7Dvar%%20s%%3Ddocument.createElement(%%22script%%22)%%3Bs.src%%3D%%22https%%3A%%2F%%2Fajax.googleapis.com%%2Fajax%%2Flibs%%2Fjquery%%2F1.7.1%%2Fjquery.min.js%%22%%3Bif(s.addEventListener)%%7Bs.addEventListener(%%22load%%22%%2Ccallback%%2Cfalse)%%7Delse%%20if(s.readyState)%%7Bs.onreadystatechange%%3Dcallback%%7Ddocument.body.appendChild(s)%%3B%%7D)()" % enc_url
return bkm_str
| apache-2.0 | 1,377,445,842,668,371,200 | 59.153846 | 787 | 0.725064 | false |
dokipen/trac-announcer-plugin | announcer/util/mail.py | 1 | 3241 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2009, Robert Corsaro
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from base64 import b32encode, b32decode
try:
from email.header import Header
except:
from email.Header import Header
MAXHEADERLEN = 76
def next_decorator(event, message, decorates):
"""
Helper method for IAnnouncerEmailDecorators. Call the next decorator
or return.
"""
if decorates and len(decorates) > 0:
next = decorates.pop()
return next.decorate_message(event, message, decorates)
def set_header(message, key, value, charset=None):
if not charset:
charset = message.get_charset() or 'ascii'
value = Header(value, charset, MAXHEADERLEN-(len(key)+2))
if message.has_key(key):
message.replace_header(key, value)
else:
message[key] = value
return message
def uid_encode(projurl, realm, target):
"""
Unique identifier used to track resources in relation to emails.
projurl included to avoid Message-ID collisions. Returns a base32 encode
UID string.
Set project_url in trac.ini for proper results.
"""
if hasattr(target, 'id'):
id = str(target.id)
elif hasattr(target, 'name'):
id = target.name
else:
id = str(target)
uid = ','.join((projurl, realm, id))
return b32encode(uid)
def uid_decode(encoded_uid):
"""
Returns a tuple of projurl, realm, id and change_num.
"""
uid = b32decode(encoded_uid)
return uid.split(',')
def msgid(uid, host='localhost'):
"""
Formatted id for email headers.
ie. <UIDUIDUIDUIDUID@localhost>
"""
return "<%s@%s>"%(uid, host)
| bsd-3-clause | -2,618,730,130,924,297,700 | 35.829545 | 79 | 0.685591 | false |
HaroldMills/Vesper | vesper/signal/wave_audio_file.py | 1 | 6118 | """Module containing class `WaveAudioFileType`."""
import os.path
import wave
import numpy as np
from vesper.signal.audio_file_reader import AudioFileReader
from vesper.signal.unsupported_audio_file_error import UnsupportedAudioFileError
'''
audio_file_utils:
read_audio_file(file_path)
write_audio_file(file_path, waveform)
class AudioFileType:
name
reader_class
writer_class
is_recognized_file(file_path)
class AudioFileReader:
file_type
num_channels
length
sample_rate
dtype
read(start_index=0, length=None, samples=None)
close()
class AudioFileWriter:
file_type
num_channels
length
sample_rate
dtype
append(samples)
close()
class WaveFileReader(AudioFileReader):
__init__(file_path)
class WaveFileWriter(AudioFileWriter):
__init__(file_path, num_channels, sample_rate, dtype=None)
'''
class WaveAudioFileReader(AudioFileReader):
def __init__(self, file_, mono_1d=False):
"""
Initializes this file reader for the specified file.
`file_` may be either a string or a file-like object. If it is a
string it should be the path of a WAV file. If it is a file-like
object, its contents should be a WAV file.
"""
if isinstance(file_, str):
# `file_` is a file path
file_path = file_
if not os.path.exists(file_path):
raise ValueError('File "{}" does not exist.'.format(file_path))
if not WaveAudioFileType.is_supported_file(file_path):
raise UnsupportedAudioFileError(
'File "{}" does not appear to be a WAV file.'.format(
file_path))
self._name = 'WAV file "{}"'.format(file_path)
else:
# `file_` is a file-like object
file_path = None
self._name = 'WAV file'
try:
self._reader = wave.open(file_, 'rb')
except:
raise OSError('Could not open {}.'.format(self._name))
try:
(num_channels, sample_width, sample_rate, length, compression_type,
compression_name) = self._reader.getparams()
except:
self._reader.close()
raise OSError('Could not read metadata from {}.'.format(self._name))
sample_size = 8 * sample_width
if compression_type != 'NONE':
raise UnsupportedAudioFileError((
'{} appears to contain compressed data (with '
'compression name "{}"), which is not '
'supported.').format(self._name, compression_name))
# TODO: support additional sample sizes, especially 24 bits.
if sample_size != 8 and sample_size != 16:
raise UnsupportedAudioFileError((
'{} contains {}-bit samples, which are '
'not supported.').format(self._name, sample_size))
if sample_size == 8:
dtype = np.uint8 # unsigned as per WAVE file spec
else:
dtype = np.dtype('<i2')
super().__init__(
file_path, WaveAudioFileType, num_channels, length, sample_rate,
dtype, mono_1d)
def read(self, start_index=0, length=None):
if self._reader is None:
raise OSError('Cannot read from closed {}.'.format(self._name))
if start_index < 0 or start_index > self.length:
raise ValueError((
'Read start index {} is out of range [{}, {}] for '
'{}.').format(start_index, 0, self.length, self._name))
if length is None:
# no length specified
length = self.length - start_index
else:
# length specified
stop_index = start_index + length
if stop_index > self.length:
# stop index exceeds file length
raise ValueError((
'Read stop index {} implied by start index {} and read '
'length {} exceeds file length {} for {}.').format(
stop_index, start_index, length, self.length,
self._name))
try:
self._reader.setpos(start_index)
except:
self._reader.close()
raise OSError(
'Set of read position failed for {}.'.format(self._name))
try:
buffer = self._reader.readframes(length)
except:
self._reader.close()
raise OSError('Samples read failed for {}.'.format(self._name))
samples = np.frombuffer(buffer, dtype=self.dtype)
if len(samples) != length * self.num_channels:
raise OSError(
'Got fewer samples than expected from read of {}.'.format(
self._name))
if self.num_channels == 1 and self.mono_1d:
samples = samples.reshape((length,))
else:
samples = samples.reshape((length, self.num_channels)).transpose()
# TODO: Deinterleave samples?
# TODO: Byte swap samples on big endian platforms?
return samples
def close(self):
if self._reader is not None:
self._reader.close()
self._reader = None
class WaveAudioFileType:
name = 'WAV Audio File Type'
reader_class = WaveAudioFileReader
# writer_class = WaveAudioFileWriter
file_name_extensions = frozenset(['.wav', '.WAV'])
@staticmethod
def is_supported_file(file_path):
extension = os.path.splitext(file_path)[1]
return extension in WaveAudioFileType.file_name_extensions
| mit | 6,694,778,441,508,051,000 | 27.588785 | 80 | 0.52746 | false |
editxt/editxt | editxt/test/command/test_pathfind.py | 1 | 4370 | # -*- coding: utf-8 -*-
# EditXT
# Copyright 2007-2016 Daniel Miller <[email protected]>
#
# This file is part of EditXT, a programmer's text editor for Mac OS X,
# which can be found at http://editxt.org/.
#
# EditXT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EditXT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EditXT. If not, see <http://www.gnu.org/licenses/>.
import os
from functools import partial
from os.path import isabs, join
from mocker import Mocker
from nose.plugins.skip import SkipTest
from editxt.test.util import eq_, gentest, test_app
import editxt.command.pathfind as mod
import editxt.config as config
from editxt.command.ag import is_ag_installed
from editxt.test.test_commands import CommandTester
def test_pathfind_title():
eq_(mod.pathfind.title, "Find file by path")
def test_pathfind():
if not is_ag_installed():
raise SkipTest("ag not installed")
filesystem = [
".git/file.txt", # excluded by default
"dir/a_file.txt",
"dir/file.pyc",
"dir/file.txt",
"dir/file_b.txt",
"dir/file/txt",
"file.doc",
"file.txt",
]
@gentest
@test_app("window project(/dir) editor*")
def test(app, command, files=None, selection=(0, 0)):
app.config.extend("ag", {
"path": config.String("ag"),
"options": config.String(""),
})
tmp = test_app(app).tmp
os.mkdir(join(tmp, ".git"))
os.mkdir(join(tmp, "dir"))
os.mkdir(join(tmp, "dir", "file"))
for path in filesystem:
assert not isabs(path), path
with open(join(tmp, path), "w") as fh:
fh.write(" ")
with open(join(tmp, ".gitignore"), "w") as fh:
fh.write("*.pyc\n")
if command:
parts = command.split(' ')
if len(parts) > 2:
if parts[2].startswith("/"):
parts[2] = join(tmp, parts[2].lstrip("/"))
assert all(p.startswith(tmp + "/")
for p in parts if p.startswith("/")), parts
command = " ".join(parts)
print(command)
editor = app.windows[0].current_editor
if editor.document is not None:
editor.document.text_storage[:] = "from file.txt import txt"
m = Mocker()
view = editor.text_view = m.mock()
(view.selectedRange() << selection).count(0, 4)
with m:
bar = CommandTester(mod.pathfind, editor=editor, output=True)
bar(command)
output = bar.output
if files is None:
assert output is None, output
else:
if output is not None:
output = output.replace(tmp + "/", "/")
expect = " ".join("<a href='xt://open/{0}{1}'>{1}</a>".format(
"/" if f.startswith("/") else "/dir/",
f[1:] if f.startswith("/") else f,
) for f in files)
eq_(" ".join(sorted(x for x in output.split("<br />") if x)), expect)
file_txt = ["a_file.txt", "file.txt", "file/txt"]
# simulate invoke with hotkey
yield test(None, file_txt, selection=(5, 8))
yield test("pathfind", file_txt, selection=(5, 8))
base_test = test
for cfg in [None, "window project(/dir)* editor"]:
test = base_test if cfg is None else partial(base_test, init_state=cfg)
yield test("pathfind file.txt", file_txt)
yield test("pathfind file\\.txt", ["a_file.txt", "file.txt"])
yield test("pathfind file\\. /", [
"/dir/a_file.txt",
"/dir/file.txt",
"/file.doc",
"/file.txt",
])
yield test("pathfind file\\. / unrestricted", [
"/.git/file.txt",
"/dir/a_file.txt",
"/dir/file.pyc",
"/dir/file.txt",
"/file.doc",
"/file.txt",
])
| gpl-3.0 | -5,833,606,073,200,345,000 | 33.409449 | 81 | 0.566133 | false |
asshinator/CodeScraps | utilities/timinator.py | 1 | 1604 | """This module is used to time the execution of other modules,
and is executed through tasks.json"""
import sys
import timeit
import cProfile
if len(sys.argv) < 2:
raise AssertionError("NoScript specified to time!")
elif ".py" not in sys.argv[1]:
print(str(sys.argv[1])+ " is not a python Script!")
exit(1)
scriptToExecute = sys.argv[1]
numberOfIterations = int(sys.argv[2]) if len(sys.argv) > 2 else 100
if "timinator" not in scriptToExecute:
#Construct the code block to execute the string.
stringerino = r'exec(open(r'
stringerino += "'"
stringerino += scriptToExecute
stringerino += "'"
stringerino += r').read())'
#Print out the name of the script we're executing
print("ScriptName:\t"+sys.argv[1])
#Mention what the script outputs, so we can always use this.
print("PythonSnippet:\t"+stringerino)
print("Execution:\t",end='')
out = exec(stringerino)
if out != None:
print(str(out))
#mention Execution time.
baseExecutionTime = timeit.timeit(stringerino, number=numberOfIterations)
print("Execution time over "+str(numberOfIterations)+" runs:\t"
+str(baseExecutionTime))
nullExecutionTime = timeit.timeit(stringerino,"isNullRun=True",number=numberOfIterations)
print("Null Execution time over "+str(numberOfIterations)+" runs:\t"
+str(nullExecutionTime))
deltaExecutionTime = (baseExecutionTime - nullExecutionTime) / numberOfIterations
print("average delta execution time:"+str(deltaExecutionTime))
else:
print("timinator can't time itself. That's a conundrum!")
| mit | 1,828,312,055,412,470,800 | 37.214286 | 93 | 0.696384 | false |
rjw57/openni-skeleton-export | examples/labelbones.py | 1 | 4166 | #!/usr/bin/env python
#
# An example script for extracting labelled images by associating points with
# their closest bone.
"""
Usage:
labelbones.py (-h | --help)
labelbones.py [--verbose] <logfile> <frame-prefix>
Options:
-h, --help Show a brief usage summary.
-v, --verbose Increase verbosity of output.
"""
import logging
import docopt
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import tables
LOG = logging.getLogger()
def main():
"""Main entry point."""
opts = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if opts['--verbose'] else logging.WARN
)
LOG.info('Opening log file {0}'.format(opts['<logfile>']))
log_root = tables.open_file(opts['<logfile>']).root
for frame in log_root.frames:
frame_idx = frame._v_attrs.idx
if frame_idx % 30 == 0:
LOG.info('Processing frame {0}...'.format(frame_idx))
user = None
for tracked_user in frame.users:
try:
if tracked_user.joints.shape[0] > 0:
user = tracked_user
except AttributeError:
# it's ok
pass
# If we have a user, detect labels
if user is None:
label_im = np.asanyarray(frame.label)
else:
label_im = bone_labels(frame, user)
label_im = label_im / float(max(1.0, label_im.max()))
label_color_im = (plt.cm.jet(label_im)[...,:3] * 255).astype(np.uint8)
Image.fromarray(label_color_im).save(
'{0}-{1:05d}.png'.format(opts['<frame-prefix>'], frame_idx))
def distances_to_line_segment(pts, line):
"""pts is a Nx3 array of 3d points.
line = (p1, p2) where p1 and p2 are 3-vectors.
"""
p1, p2 = line
p1, p2 = np.asarray(p1), np.asarray(p2)
# Let bone line be a + t * n
# Compute n
n = p2 - p1
line_len = np.sqrt(np.sum(n**2))
n /= line_len
# Compute points using p1 and p2 as origin
# Note, x = p - a
x, y = np.copy(pts), np.copy(pts)
for i in range(3):
x[:,i] -= p1[i]
y[:,i] -= p2[i]
# Squared distances to p1 and p2
d1 = np.sum(x**2, axis=1)
d2 = np.sum(y**2, axis=1)
# Compute t = (p - a) . n
xdotn = np.copy(x)
for i in range(3):
xdotn[:,i] *= n[i]
xdotn = np.sum(xdotn, axis=1)
# Compute squared distance to line
dl = np.zeros_like(xdotn)
for i in range(3):
dl += (x[:,i] - xdotn * n[i]) ** 2
# Compute length along line
norm_len = xdotn / line_len
# Which distance should we use?
d = np.where(norm_len < 0, d1, np.where(norm_len > 1, d2, dl))
return np.sqrt(d)
def bone_labels(frame, user):
# Get points for this user
pts = frame.points[:]
pt_labels = frame.point_labels[:]
user_pts = pts[pt_labels == user._v_attrs.idx, :]
joint_map = {}
for joint in user.joints:
joint_map[joint['id']] = (joint['x'], joint['y'], joint['z'])
# Get bones
bones = dict(
neck = (1,2),
left_forearm = (9,7), left_arm = (7,6),
right_forearm = (13,15), right_arm = (12,13),
left_chest = (6,17), right_chest = (12,21),
left_thigh = (17,18), left_calf = (18,20),
right_thigh = (21,22), right_calf = (22,24),
left_collar = (2,6), right_collar = (2,12),
# chest = (2,3)
)
bone_lines = {}
for bone_name, bone_joints in bones.items():
j1, j2 = bone_joints
if j1 not in joint_map or j2 not in joint_map:
continue
j1_loc, j2_loc = tuple(joint_map[j] for j in (j1,j2))
bone_lines[bone_name] = np.array((j1_loc, j2_loc))
bone_names = sorted(bone_lines.keys())
bone_dists = np.zeros((user_pts.shape[0], len(bone_names)))
for i, n in enumerate(bone_names):
bone_dists[:,i] = distances_to_line_segment(user_pts, bone_lines[n])
closest_bone_indices = np.argmin(bone_dists, axis=1)
label_image = np.zeros_like(frame.depth)
label_image[frame.label == user._v_attrs.idx] = closest_bone_indices + 1
return label_image
if __name__ == '__main__':
main()
| apache-2.0 | -3,595,863,162,251,792,400 | 27.930556 | 78 | 0.56097 | false |
malept/gmusicprocurator | docs/conf.py | 1 | 12282 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import json
import os
import re
import sys
THIS_DIR = os.path.abspath('.')
BASE_DIR = os.path.abspath('..')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, BASE_DIR)
bower_metadata = json.load(open(os.path.join(BASE_DIR, 'bower.json')))
npm_metadata = json.load(open(os.path.join(BASE_DIR, 'package.json')))
def setup(app):
app.add_config_value('readthedocs', False, True)
readthedocs = os.environ.get('READTHEDOCS') == 'True'
if readthedocs:
os.environ['GMUSICPROCURATOR_SETTINGS'] = 'default_settings.py'
# -- General configuration ----------------------------------------------------
AUTHORS = u', '.join(bower_metadata['authors'])
TITLE = u'GMusicProcurator'
LONG_TITLE = u'{0} Documentation'.format(TITLE)
SUMMARY = bower_metadata['description']
SHORT_COPYRIGHT = u'2014, {0}. Some Rights Reserved.'.format(AUTHORS)
COPYRIGHT = u'''{0}
This work is licensed under a
Creative Commons Attribution-ShareAlike 4.0
International License'''.format(SHORT_COPYRIGHT)
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinxcontrib.autohttp.flask',
]
if not readthedocs:
extensions += [
'sphinxcontrib.coffeedomain',
]
try:
import rst2pdf
except ImportError:
rst2pdf = None
if rst2pdf:
extensions.append('rst2pdf.pdfbuilder')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = TITLE
copyright = COPYRIGHT
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = re.match(r'\d+\.\d+', npm_metadata['version']).group(0)
# The full version, including alpha/beta/rc tags.
release = npm_metadata['version']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
if readthedocs:
exclude_patterns += [
'coffeescript.rst',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# intersphinx extension
intersphinx_mapping = {
'py': ('http://docs.python.org/2.7/', None)
}
mdn_inv = os.path.join(THIS_DIR, 'mdn-js-objects.inv')
bb_inv = os.path.join(THIS_DIR, 'backbone.inv')
if not readthedocs:
if os.path.exists(mdn_inv):
mdn = 'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/'
intersphinx_mapping['js'] = (mdn, mdn_inv)
if os.path.exists(bb_inv):
intersphinx_mapping['backbone'] = ('http://backbonejs.org/', bb_inv)
# coffeedomain extension
coffee_src_dir = os.path.join(BASE_DIR, 'gmusicprocurator', 'static', 'cs')
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'localtoc.html',
'relations.html',
'sourcelink.html',
'searchbox.html',
'copyright_sidebar.html',
],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GMusicProcuratordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GMusicProcurator.tex', LONG_TITLE, AUTHORS, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gmusicprocurator', LONG_TITLE, [AUTHORS], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GMusicProcurator', LONG_TITLE, AUTHORS,
'GMusicProcurator', SUMMARY, 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = TITLE
epub_author = AUTHORS
epub_publisher = AUTHORS
epub_copyright = COPYRIGHT
# The basename for the epub file. It defaults to the project name.
# epub_basename = TITLE
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to
# save visual space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
('index', u'gmusicprocurator', TITLE, AUTHORS),
]
| gpl-3.0 | 5,358,588,643,516,297,000 | 29.552239 | 82 | 0.693535 | false |
micahflee/onionshare | desktop/src/onionshare/tab/mode/receive_mode/__init__.py | 1 | 15632 | # -*- coding: utf-8 -*-
"""
OnionShare | https://onionshare.org/
Copyright (C) 2014-2021 Micah Lee, et al. <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from PySide2 import QtCore, QtWidgets, QtGui
from onionshare_cli.web import Web
from ..history import History, ToggleHistory, ReceiveHistoryItem
from .. import Mode
from .... import strings
from ....widgets import MinimumSizeWidget, Alert
from ....gui_common import GuiCommon
class ReceiveMode(Mode):
"""
Parts of the main window UI for receiving files.
"""
def init(self):
"""
Custom initialization for ReceiveMode.
"""
# Create the Web object
self.web = Web(self.common, True, self.settings, "receive")
# Receive image
self.image_label = QtWidgets.QLabel()
self.image_label.setPixmap(
QtGui.QPixmap.fromImage(
QtGui.QImage(
GuiCommon.get_resource_path(
"images/{}_mode_receive.png".format(self.common.gui.color_mode)
)
)
)
)
self.image_label.setFixedSize(250, 250)
image_layout = QtWidgets.QVBoxLayout()
image_layout.addWidget(self.image_label)
self.image = QtWidgets.QWidget()
self.image.setLayout(image_layout)
# Settings
# Data dir
data_dir_label = QtWidgets.QLabel(
strings._("mode_settings_receive_data_dir_label")
)
self.data_dir_lineedit = QtWidgets.QLineEdit()
self.data_dir_lineedit.setReadOnly(True)
self.data_dir_lineedit.setText(self.settings.get("receive", "data_dir"))
data_dir_button = QtWidgets.QPushButton(
strings._("mode_settings_receive_data_dir_browse_button")
)
data_dir_button.clicked.connect(self.data_dir_button_clicked)
data_dir_layout = QtWidgets.QHBoxLayout()
data_dir_layout.addWidget(data_dir_label)
data_dir_layout.addWidget(self.data_dir_lineedit)
data_dir_layout.addWidget(data_dir_button)
self.mode_settings_widget.mode_specific_layout.addLayout(data_dir_layout)
# Disable text or files
self.disable_text_checkbox = self.settings.get("receive", "disable_files")
self.disable_text_checkbox = QtWidgets.QCheckBox()
self.disable_text_checkbox.clicked.connect(self.disable_text_checkbox_clicked)
self.disable_text_checkbox.setText(
strings._("mode_settings_receive_disable_text_checkbox")
)
self.disable_files_checkbox = self.settings.get("receive", "disable_files")
self.disable_files_checkbox = QtWidgets.QCheckBox()
self.disable_files_checkbox.clicked.connect(self.disable_files_checkbox_clicked)
self.disable_files_checkbox.setText(
strings._("mode_settings_receive_disable_files_checkbox")
)
disable_layout = QtWidgets.QHBoxLayout()
disable_layout.addWidget(self.disable_text_checkbox)
disable_layout.addWidget(self.disable_files_checkbox)
disable_layout.addStretch()
self.mode_settings_widget.mode_specific_layout.addLayout(disable_layout)
# Webhook URL
webhook_url = self.settings.get("receive", "webhook_url")
self.webhook_url_checkbox = QtWidgets.QCheckBox()
self.webhook_url_checkbox.clicked.connect(self.webhook_url_checkbox_clicked)
self.webhook_url_checkbox.setText(
strings._("mode_settings_receive_webhook_url_checkbox")
)
self.webhook_url_lineedit = QtWidgets.QLineEdit()
self.webhook_url_lineedit.editingFinished.connect(
self.webhook_url_editing_finished
)
self.webhook_url_lineedit.setPlaceholderText(
"https://example.com/post-when-file-uploaded"
)
webhook_url_layout = QtWidgets.QHBoxLayout()
webhook_url_layout.addWidget(self.webhook_url_checkbox)
webhook_url_layout.addWidget(self.webhook_url_lineedit)
if webhook_url is not None and webhook_url != "":
self.webhook_url_checkbox.setCheckState(QtCore.Qt.Checked)
self.webhook_url_lineedit.setText(
self.settings.get("receive", "webhook_url")
)
self.show_webhook_url()
else:
self.webhook_url_checkbox.setCheckState(QtCore.Qt.Unchecked)
self.hide_webhook_url()
self.mode_settings_widget.mode_specific_layout.addLayout(webhook_url_layout)
# Set title placeholder
self.mode_settings_widget.title_lineedit.setPlaceholderText(
strings._("gui_tab_name_receive")
)
# Server status
self.server_status.set_mode("receive")
self.server_status.server_started_finished.connect(self.update_primary_action)
self.server_status.server_stopped.connect(self.update_primary_action)
self.server_status.server_canceled.connect(self.update_primary_action)
# Tell server_status about web, then update
self.server_status.web = self.web
self.server_status.update()
# Upload history
self.history = History(
self.common,
QtGui.QPixmap.fromImage(
QtGui.QImage(
GuiCommon.get_resource_path("images/receive_icon_transparent.png")
)
),
strings._("gui_receive_mode_no_files"),
strings._("gui_all_modes_history"),
)
self.history.hide()
# Toggle history
self.toggle_history = ToggleHistory(
self.common,
self,
self.history,
QtGui.QIcon(GuiCommon.get_resource_path("images/receive_icon_toggle.png")),
QtGui.QIcon(
GuiCommon.get_resource_path("images/receive_icon_toggle_selected.png")
),
)
# Header
header_label = QtWidgets.QLabel(strings._("gui_new_tab_receive_button"))
header_label.setStyleSheet(self.common.gui.css["mode_header_label"])
# Receive mode warning
receive_warning = QtWidgets.QLabel(strings._("gui_receive_mode_warning"))
receive_warning.setMinimumHeight(80)
receive_warning.setWordWrap(True)
# Top bar
top_bar_layout = QtWidgets.QHBoxLayout()
top_bar_layout.addStretch()
top_bar_layout.addWidget(self.toggle_history)
# Main layout
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.addWidget(header_label)
self.main_layout.addWidget(receive_warning)
self.main_layout.addWidget(self.primary_action, stretch=1)
self.main_layout.addWidget(MinimumSizeWidget(525, 0))
# Row layout
content_row = QtWidgets.QHBoxLayout()
content_row.addLayout(self.main_layout)
content_row.addWidget(self.image)
row_layout = QtWidgets.QVBoxLayout()
row_layout.addLayout(top_bar_layout)
row_layout.addLayout(content_row, stretch=1)
row_layout.addWidget(self.server_status)
# Column layout
self.column_layout = QtWidgets.QHBoxLayout()
self.column_layout.addLayout(row_layout)
self.column_layout.addWidget(self.history, stretch=1)
# Wrapper layout
self.wrapper_layout = QtWidgets.QVBoxLayout()
self.wrapper_layout.addLayout(self.column_layout)
self.setLayout(self.wrapper_layout)
def get_type(self):
"""
Returns the type of mode as a string (e.g. "share", "receive", etc.)
"""
return "receive"
def data_dir_button_clicked(self):
"""
Browse for a new OnionShare data directory, and save to tab settings
"""
data_dir = self.data_dir_lineedit.text()
selected_dir = QtWidgets.QFileDialog.getExistingDirectory(
self, strings._("mode_settings_receive_data_dir_label"), data_dir
)
if selected_dir:
# If we're running inside a flatpak package, the data dir must be inside ~/OnionShare
if self.common.gui.is_flatpak:
if not selected_dir.startswith(os.path.expanduser("~/OnionShare")):
Alert(self.common, strings._("gui_receive_flatpak_data_dir"))
return
self.common.log(
"ReceiveMode",
"data_dir_button_clicked",
f"selected dir: {selected_dir}",
)
self.data_dir_lineedit.setText(selected_dir)
self.settings.set("receive", "data_dir", selected_dir)
def disable_text_checkbox_clicked(self):
self.settings.set(
"receive", "disable_text", self.disable_text_checkbox.isChecked()
)
def disable_files_checkbox_clicked(self):
self.settings.set(
"receive", "disable_files", self.disable_files_checkbox.isChecked()
)
def webhook_url_checkbox_clicked(self):
if self.webhook_url_checkbox.isChecked():
if self.settings.get("receive", "webhook_url"):
self.webhook_url_lineedit.setText(
self.settings.get("receive", "webhook_url")
)
self.show_webhook_url()
else:
self.settings.set("receive", "webhook_url", None)
self.hide_webhook_url()
def webhook_url_editing_finished(self):
self.settings.set("receive", "webhook_url", self.webhook_url_lineedit.text())
def hide_webhook_url(self):
self.webhook_url_lineedit.hide()
def show_webhook_url(self):
self.webhook_url_lineedit.show()
def get_stop_server_autostop_timer_text(self):
"""
Return the string to put on the stop server button, if there's an auto-stop timer
"""
return strings._("gui_receive_stop_server_autostop_timer")
def autostop_timer_finished_should_stop_server(self):
"""
The auto-stop timer expired, should we stop the server? Returns a bool
"""
# If there were no attempts to upload files, or all uploads are done, we can stop
if (
self.web.receive_mode.cur_history_id == 0
or not self.web.receive_mode.uploads_in_progress
):
self.server_status.stop_server()
self.server_status_label.setText(strings._("close_on_autostop_timer"))
return True
# An upload is probably still running - hold off on stopping the share, but block new shares.
else:
self.server_status_label.setText(
strings._("gui_receive_mode_autostop_timer_waiting")
)
self.web.receive_mode.can_upload = False
return False
def start_server_custom(self):
"""
Starting the server.
"""
# Reset web counters
self.web.receive_mode.cur_history_id = 0
self.web.reset_invalid_passwords()
# Hide and reset the uploads if we have previously shared
self.reset_info_counters()
# Set proxies for webhook URL
if self.common.gui.local_only:
self.web.proxies = None
else:
(socks_address, socks_port) = self.common.gui.onion.get_tor_socks_port()
self.web.proxies = {
"http": f"socks5h://{socks_address}:{socks_port}",
"https": f"socks5h://{socks_address}:{socks_port}",
}
def start_server_step2_custom(self):
"""
Step 2 in starting the server.
"""
# Continue
self.starting_server_step3.emit()
self.start_server_finished.emit()
def handle_tor_broke_custom(self):
"""
Connection to Tor broke.
"""
self.primary_action.hide()
def handle_request_load(self, event):
"""
Handle REQUEST_LOAD event.
"""
self.system_tray.showMessage(
strings._("systray_page_loaded_title"),
strings._("systray_page_loaded_message"),
)
def handle_request_started(self, event):
"""
Handle REQUEST_STARTED event.
"""
item = ReceiveHistoryItem(
self.common,
event["data"]["id"],
event["data"]["content_length"],
)
self.history.add(event["data"]["id"], item)
self.toggle_history.update_indicator(True)
self.history.in_progress_count += 1
self.history.update_in_progress()
self.system_tray.showMessage(
strings._("systray_receive_started_title"),
strings._("systray_receive_started_message"),
)
def handle_request_progress(self, event):
"""
Handle REQUEST_PROGRESS event.
"""
self.history.update(
event["data"]["id"],
{"action": "progress", "progress": event["data"]["progress"]},
)
def handle_request_upload_includes_message(self, event):
"""
Handle REQUEST_UPLOAD_INCLUDES_MESSAGE event.
"""
self.history.includes_message(event["data"]["id"], event["data"]["filename"])
def handle_request_upload_file_renamed(self, event):
"""
Handle REQUEST_UPLOAD_FILE_RENAMED event.
"""
self.history.update(
event["data"]["id"],
{
"action": "rename",
"old_filename": event["data"]["old_filename"],
"new_filename": event["data"]["new_filename"],
},
)
def handle_request_upload_set_dir(self, event):
"""
Handle REQUEST_UPLOAD_SET_DIR event.
"""
self.history.update(
event["data"]["id"],
{
"action": "set_dir",
"filename": event["data"]["filename"],
"dir": event["data"]["dir"],
},
)
def handle_request_upload_finished(self, event):
"""
Handle REQUEST_UPLOAD_FINISHED event.
"""
self.history.update(event["data"]["id"], {"action": "finished"})
self.history.completed_count += 1
self.history.in_progress_count -= 1
self.history.update_completed()
self.history.update_in_progress()
def handle_request_upload_canceled(self, event):
"""
Handle REQUEST_UPLOAD_CANCELED event.
"""
self.history.update(event["data"]["id"], {"action": "canceled"})
self.history.in_progress_count -= 1
self.history.update_in_progress()
def on_reload_settings(self):
"""
We should be ok to re-enable the 'Start Receive Mode' button now.
"""
self.primary_action.show()
def reset_info_counters(self):
"""
Set the info counters back to zero.
"""
self.history.reset()
self.toggle_history.indicator_count = 0
self.toggle_history.update_indicator()
def update_primary_action(self):
self.common.log("ReceiveMode", "update_primary_action")
| gpl-3.0 | 5,165,836,029,262,839,000 | 35.269142 | 101 | 0.604273 | false |
YingYang/STFT_R_git_repo | MNE_stft/mne_stft_regression_individual_G.py | 1 | 12643 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 1 12:55:06 2014
@author: ying
"""
import mne
import numpy as np
#import matplotlib.pyplot as plt
from mne.minimum_norm.inverse import (apply_inverse, _check_method, _check_ori,
prepare_inverse_operator, _pick_channels_inverse_operator, _check_ch_names,
_assemble_kernel, combine_xyz)
from mne.io.constants import FIFF
from mne.time_frequency import stft, istft
import numpy.linalg as la
# ============================================================================
def _apply_inverse_evoked_list(evoked_list, inverse_operator, lambda2, method="MNE",
labels=None, nave=1, pick_ori=None,
verbose=None, pick_normal=None):
""" Utility function for applying the inverse solution to a list of evoked object
Assume that the info for each evoked object in the list is the same
Input:
evoked_list,
inverse_operator,
lambda2,
method,
labels, list of label objects
nave = 1,
pick_ori = None,
verbos = none,
pick_normal = None
Output: stc_Data, [n_sources_labels, n_times, n_trials]
"""
info = evoked_list[0].info
method = _check_method(method)
pick_ori = _check_ori(pick_ori)
_check_ch_names(inverse_operator, info)
inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
sel = _pick_channels_inverse_operator(info['ch_names'], inv)
labels_union = None
if labels is not None:
labels_union = labels[0]
if len(labels) > 1:
for i in range(1,len(labels)):
labels_union += labels[i]
K, noise_norm, vertno = _assemble_kernel(inv, labels_union, method, pick_ori)
is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
and pick_ori is None)
if not is_free_ori and noise_norm is not None:
# premultiply kernel with noise normalization
K *= noise_norm
n_channels = len(sel)
n_times = len(evoked_list[0].times)
n_trials = len(evoked_list)
n_sources = K.shape[0]
stc_Data = np.zeros([n_sources,n_times, n_trials])
for i in range(n_trials):
if is_free_ori:
# Compute solution and combine current components (non-linear)
sol = np.dot(K, evoked_list[i].data) # apply imaging kernel
if is_free_ori:
sol = combine_xyz(sol)
if noise_norm is not None:
sol *= noise_norm
else:
# Linear inverse: do computation here or delayed
sol = np.dot(K, evoked_list[i].data)
stc_Data[:,:,i] = sol
return stc_Data
# ===========================================================================
def mne_stft_regression_individual_G(evoked_list, inverse_operator_list, G_ind, X,
labels = None, pick_ori=None, pick_normal=None,
snr=1, wsize = 16, tstep = 4, Flag_reg_stats = False,
method = "MNE"):
''' Get the MNE solution for a given snr(lambda value)
Input:
evoked_list, a list of evoked instances
inverse_operator, the inverse operator for MNE
X, [n_trials, p] array
labels, ROI labels list, if None, use the whole brain
snr, controls lambda
wsize, window size of the stft transform
tstep, time step of the stft transform
method, "MNE", "dSPM", "sLORETA",
Note that dSPM and sLORETA can not be used for prediction,
and the coefficients are normalized too.
Output:
result_dict = dict(coef = coef, F = F, sel = sel,roi_data = roi_data)
['coef']: Regression coefficients, complex arrays [n_dipoles,n_coefs,n_steps,p]
['F'],F-statistics, complex arrays
['sel'], selction of the source points, columns of G
['roi_data'] the source data in the ROI
'''
n_trials = len(evoked_list)
sel = []
# The following line is wrong
n_dipoles = inverse_operator_list[0]['nsource']
# if label is specified, only do the regression on the labels
# otherwise, select the data for the whole brain.
if labels is not None:
for i in range(len(labels)):
_, sel_tmp = mne.source_space.label_src_vertno_sel(labels[i],
inverse_operator_list[0]['src'])
sel = np.hstack([sel, sel_tmp])
sel = sel.astype(np.int)
else:
sel = np.arange(0,n_dipoles,1)
sel.astype(np.int)
# tested, the result is the same as running apply_inverse()
n_run = len(np.unique(G_ind))
n_dipoles = len(sel)
n_times = evoked_list[0].data.shape[1]
roi_data = np.zeros([n_dipoles, n_times, n_trials])
n_trials = len(evoked_list)
for run_id in range(n_run):
tmp_evoked_list = [evoked_list[k] for k in range(n_trials) if G_ind[k] == run_id]
tmp = _apply_inverse_evoked_list(tmp_evoked_list, inverse_operator_list[run_id],
lambda2= 1.0/snr**2, method=method,
labels=labels, nave=1, pick_ori=pick_ori,
verbose=None, pick_normal=None)
roi_data[:,:, G_ind == run_id] = tmp
# stft transform, F means the coefficients
F_roi_data = list()
for i in range(n_trials):
F_roi_data.append(stft(roi_data[:,:,i], wsize= wsize, tstep = tstep))
# put the stft transform into a matrix
dim0,dim1,dim2 = F_roi_data[0].shape
F_roi_data_3d = np.zeros([dim0,dim1,dim2,n_trials],dtype = np.complex)
for i in range(n_trials):
F_roi_data_3d[:,:,:,i] = F_roi_data[i]
del(F_roi_data)
# regression, return coefficients and F-values
p = X.shape[1]
coef = np.zeros([dim0,dim1,dim2,p], dtype = np.complex)
F = np.zeros([dim0,dim1,dim2], dtype = np.complex) if Flag_reg_stats else None
linreg_op = np.dot(la.inv(X.T.dot(X)),X.T)
for i in range(dim0):
for j in range(dim1):
for k in range(dim2):
tmpY = np.real(F_roi_data_3d[i,j,k,:])
tmp_coef = linreg_op.dot(tmpY)
# debug
#tmp_coef2 = np.linalg.lstsq(X,tmpY)[0]
#print np.linalg.norm(tmp_coef-tmp_coef2)
coef[i,j,k,:] += tmp_coef
if Flag_reg_stats:
tmpY_hat = np.dot(X,tmp_coef)
tmp_res = tmpY_hat-tmpY
SSE = np.dot(tmp_res,tmp_res)
SST = np.sum((tmpY-np.mean(tmpY))**2)
if SSE== 0:
F[i,j,k] += 0
else:
F[i,j,k] += (SST-SSE)/(p-1)/(SSE/(n_trials-p))
# imaginary
tmpY = np.imag(F_roi_data_3d[i,j,k,:])
tmp_coef = linreg_op.dot(tmpY)
coef[i,j,k,:] += tmp_coef*1j
if Flag_reg_stats:
tmpY_hat = np.dot(X,tmp_coef)
tmp_res = tmpY_hat-tmpY
SSE = np.dot(tmp_res,tmp_res)
SST = np.sum((tmpY-np.mean(tmpY))**2)
if SSE== 0:
F[i,j,k] += 0
else:
F[i,j,k] += (SST-SSE)/(p-1)/(SSE/(n_trials-p))*1j
result_dict = dict(coef = coef, F = F, sel = sel,roi_data_3D = roi_data)
return result_dict
#===============================================================
def get_MSE_mne_stft_regression_individual_G(evoked_list, fwd_list, G_ind, X, coef, labels,
wsize = 16, tstep = 4):
'''
Use the mne regression coefficients to get predicted sensor data,
then abtain the sum of squared error
Input:
evoked_list, a list of evoked objects
fwd, the forward solution
X, the design matrix,
coef, the regression coefficients, [n_dipoles,n_coefs,n_steps,p]
wsize, STFT window size
tstep, STFT time step
Output:
MSE, the sum of squared error across trials
'''
sel = []
n_dipoles = fwd_list[0]['nsource']
if labels is not None:
for i in range(len(labels)):
_, sel_tmp = mne.source_space.label_src_vertno_sel(labels[i],fwd_list[0]['src'])
sel = np.hstack([sel, sel_tmp])
sel = sel.astype(np.int)
else:
sel = np.arange(0,n_dipoles,1)
sel.astype(np.int)
# prepair the forward solution
evoked_ch_names = evoked_list[0].info['ch_names']
fwd_ch_names = fwd_list[0]['info']['ch_names']
channel_sel = [i for i in range(len(fwd_ch_names)) \
if fwd_ch_names[i] in evoked_ch_names]
ntimes = len(evoked_list[0].times)
G_list = list()
n_run = len(np.unique(G_ind))
for run_id in range(n_run):
G = fwd_list[run_id]['sol']['data'][channel_sel,:]
G = G[:,sel]
G_list.append(G)
n_trials,p = X.shape
if n_trials != len(evoked_list):
raise ValueError("the numbers of trials do not match")
SSE = 0.0
for r in range(n_trials):
# STFT coefficients of current trial
#predicted_stft_coef = np.zeros(coef.shape[0:3], dtype = np.complex)
#for j in range(p):
# predicted_stft_coef += coef[:,:,:,j]*X[r,j]
predicted_stft_coef = np.sum(coef*X[r,:],axis = 3)
# istft
G = G_list[G_ind[r]]
predicted_sensor = G.dot(np.real(istft(predicted_stft_coef, tstep = tstep, Tx = ntimes)))
SSE += np.sum((evoked_list[r].data - predicted_sensor)**2)
MSE = SSE/(n_trials)
return MSE
# ==============================================================
def select_lambda_tuning_mne_stft_regression_cv_individual_G(evoked_list, inverse_operator_list,
fwd_list, G_ind, X, cv_partition_ind,
snr_tuning_seq,
labels = None,
wsize= 16, tstep = 4):
'''
Use cross-validation to select the best lambda (tuning snr values)
All source points across the whole brain must be used,
This may require a large membory
Input:
evoked_list, n_trials of evoked objects
inverse_operator, the inverse_operator,
fwd, the forward solution
X, [n_trials,p] the design matrix
cv_partition_ind, [n_trials,] parition index for cross validcation
snr_tuning_seq, a sequence of "snr" parameter
wsize, STFT window size
tstep, STFT time step
Output:
best_snr_tuning, the best snr paramter
cv_MSE, the cross validated SSE for each snr parameters
'''
n_fold = len(np.unique(cv_partition_ind))
# number of tuning paramters
n_par_tuning = len(snr_tuning_seq)
cv_MSE = np.ones([len(snr_tuning_seq),n_fold], dtype = np.float)*np.Inf
for j in range(n_fold):
# partition
test_trials = np.nonzero(cv_partition_ind == j)[0]
train_trials = np.nonzero(cv_partition_ind != j)[0]
evoked_list_train = [evoked_list[r] for r in range(len(evoked_list)) \
if r in train_trials]
Xtrain = X[train_trials,:]
evoked_list_test = [evoked_list[r] for r in range(len(evoked_list)) \
if r in test_trials]
Xtest = X[test_trials,:]
G_ind_train = G_ind[train_trials]
G_ind_test = G_ind[test_trials]
for i in range(n_par_tuning):
tmp_snr = snr_tuning_seq[i]
tmp_result = mne_stft_regression_individual_G(evoked_list_train, inverse_operator_list, G_ind_train,
Xtrain, labels = labels,
snr=tmp_snr, wsize = wsize, tstep = tstep)
coef = tmp_result['coef']
# Now do the prediction
tmp_MSE = get_MSE_mne_stft_regression_individual_G(evoked_list_test, fwd_list, G_ind_test, Xtest,
coef, labels = labels,
wsize = wsize, tstep = tstep)
cv_MSE[i,j] = tmp_MSE
cv_MSE = cv_MSE.mean(axis = 1)
best_ind = np.argmin(cv_MSE)
snr_tuning_star = snr_tuning_seq[best_ind]
return snr_tuning_star, cv_MSE
| gpl-3.0 | 9,214,749,064,001,478,000 | 42.450172 | 113 | 0.529779 | false |
google-research/google-research | stacked_capsule_autoencoders/capsules/math_ops.py | 1 | 4235 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Math ops.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sonnet as snt
import tensorflow.compat.v1 as tf
def relu1(x):
return tf.nn.relu6(x * 6.) / 6.
def safe_log(tensor, eps=1e-16):
is_zero = tf.less(tensor, eps)
tensor = tf.where(is_zero, tf.ones_like(tensor), tensor)
tensor = tf.where(is_zero, tf.zeros_like(tensor) - 1e8, tf.log(tensor))
return tensor
def safe_ce(labels, probs, axis=-1):
return tf.reduce_mean(-tf.reduce_sum(labels * safe_log(probs), axis=axis))
def flat_reduce(tensor, reduce_type='sum', final_reduce='mean'):
"""Flattens the tensor and reduces it."""
def _reduce(tensor, how, *args):
return getattr(tf, 'reduce_{}'.format(how))(tensor, *args) # pylint:disable=not-callable
tensor = snt.BatchFlatten()(tensor)
tensor = _reduce(tensor, reduce_type, -1)
if final_reduce is not None:
tensor = _reduce(tensor, final_reduce)
return tensor
def to_homogenous(tensor):
one = tf.ones_like(tensor[Ellipsis, :1])
return tf.concat([tensor, one], -1)
def from_homogenous(tensor):
tensor = tensor[Ellipsis, :-1] / (tensor[Ellipsis, -1:] + 1e-8)
return tensor
def apply_transform(transform, tensor=None, affine=True):
"""Applies a linear transform to a tensor.
Returns the translation components of the transform if tensor=None.
Args:
transform: [..., d+1, d+1] tensor.
tensor: [..., d] tensor or None.
affine: boolean; assumes affine transformation if True and does a smaller
matmul + offset instead of matmul.
Returns:
[..., d] tensor.
"""
if tensor is None:
# extract translation
tensor = transform[Ellipsis, :-1, -1]
elif affine:
tensor = tf.matmul(tensor, transform[Ellipsis, :-1, :-1], transpose_b=True)
tensor = (tensor + transform[Ellipsis, :-1, -1])
else:
tensor = to_homogenous(tensor)
tensor = tf.matmul(tensor, transform, transpose_b=True)
tensor = from_homogenous(tensor)
return tensor
def geometric_transform(pose_tensor, similarity=False, nonlinear=True,
as_matrix=False):
"""Convers paramer tensor into an affine or similarity transform.
Args:
pose_tensor: [..., 6] tensor.
similarity: bool.
nonlinear: bool; applies nonlinearities to pose params if True.
as_matrix: bool; convers the transform to a matrix if True.
Returns:
[..., 3, 3] tensor if `as_matrix` else [..., 6] tensor.
"""
scale_x, scale_y, theta, shear, trans_x, trans_y = tf.split(
pose_tensor, 6, -1)
if nonlinear:
scale_x, scale_y = (tf.nn.sigmoid(i) + 1e-2
for i in (scale_x, scale_y))
trans_x, trans_y, shear = (
tf.nn.tanh(i * 5.) for i in (trans_x, trans_y, shear))
theta *= 2. * math.pi
else:
scale_x, scale_y = (abs(i) + 1e-2 for i in (scale_x, scale_y))
c, s = tf.cos(theta), tf.sin(theta)
if similarity:
scale = scale_x
pose = [scale * c, -scale * s, trans_x, scale * s, scale * c, trans_y]
else:
pose = [
scale_x * c + shear * scale_y * s, -scale_x * s + shear * scale_y * c,
trans_x, scale_y * s, scale_y * c, trans_y
]
pose = tf.concat(pose, -1)
# convert to a matrix
if as_matrix:
shape = pose.shape[:-1].as_list()
shape += [2, 3]
pose = tf.reshape(pose, shape)
zeros = tf.zeros_like(pose[Ellipsis, :1, 0])
last = tf.stack([zeros, zeros, zeros + 1], -1)
pose = tf.concat([pose, last], -2)
return pose
def normalize(tensor, axis):
return tensor / (tf.reduce_sum(tensor, axis, keepdims=True) + 1e-8)
| apache-2.0 | -4,903,853,938,586,949,000 | 26.322581 | 93 | 0.647934 | false |
1fish2/the-blue-alliance | controllers/backup_controller.py | 1 | 12856 | import cloudstorage
import csv
import datetime
import json
import logging
import os
import StringIO
import tba_config
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from helpers.award_manipulator import AwardManipulator
from helpers.event_manipulator import EventManipulator
from helpers.match_manipulator import MatchManipulator
from models.award import Award
from models.event import Event
from models.match import Match
from models.team import Team
from datafeeds.csv_alliance_selections_parser import CSVAllianceSelectionsParser
from datafeeds.csv_awards_parser import CSVAwardsParser
from datafeeds.offseason_matches_parser import OffseasonMatchesParser
class TbaCSVBackupEventsEnqueue(webapp.RequestHandler):
"""
Enqueues CSV backup
"""
def get(self, year=None):
if year is None:
years = range(1992, datetime.datetime.now().year + 1)
for y in years:
taskqueue.add(
url='/tasks/enqueue/csv_backup_events/{}'.format(y),
method='GET')
self.response.out.write("Enqueued backup for years: {}".format(years))
else:
event_keys = Event.query(Event.year == int(year)).fetch(None, keys_only=True)
for event_key in event_keys:
taskqueue.add(
url='/tasks/do/csv_backup_event/{}'.format(event_key.id()),
method='GET')
template_values = {'event_keys': event_keys}
path = os.path.join(os.path.dirname(__file__), '../templates/backup/csv_backup_enqueue.html')
self.response.out.write(template.render(path, template_values))
class TbaCSVBackupEventDo(webapp.RequestHandler):
"""
Backs up event awards, matches, team list, rankings, and alliance selection order
"""
AWARDS_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/events/{}/{}/{}_awards.csv' # % (year, event_key, event_key)
MATCHES_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/events/{}/{}/{}_matches.csv' # % (year, event_key, event_key)
TEAMS_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/events/{}/{}/{}_teams.csv' # % (year, event_key, event_key)
RANKINGS_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/events/{}/{}/{}_rankings.csv' # % (year, event_key, event_key)
ALLIANCES_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/events/{}/{}/{}_alliances.csv' # % (year, event_key, event_key)
def get(self, event_key):
event = Event.get_by_id(event_key)
event.prepAwardsMatchesTeams()
if event.awards:
with cloudstorage.open(self.AWARDS_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as awards_file:
writer = csv.writer(awards_file, delimiter=',')
for award in event.awards:
for recipient in award.recipient_list:
team = recipient['team_number']
if type(team) == int:
team = 'frc{}'.format(team)
self._writerow_unicode(writer, [award.key.id(), award.name_str, team, recipient['awardee']])
if event.matches:
with cloudstorage.open(self.MATCHES_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as matches_file:
writer = csv.writer(matches_file, delimiter=',')
for match in event.matches:
red_score = match.alliances['red']['score']
blue_score = match.alliances['blue']['score']
self._writerow_unicode(writer, [match.key.id()] + match.alliances['red']['teams'] + match.alliances['blue']['teams'] + [red_score, blue_score])
if event.teams:
with cloudstorage.open(self.TEAMS_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as teams_file:
writer = csv.writer(teams_file, delimiter=',')
self._writerow_unicode(writer, [team.key.id() for team in event.teams])
if event.rankings:
with cloudstorage.open(self.RANKINGS_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as rankings_file:
writer = csv.writer(rankings_file, delimiter=',')
for row in event.rankings:
self._writerow_unicode(writer, row)
if event.alliance_selections:
with cloudstorage.open(self.ALLIANCES_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as alliances_file:
writer = csv.writer(alliances_file, delimiter=',')
for alliance in event.alliance_selections:
self._writerow_unicode(writer, alliance['picks'])
self.response.out.write("Done backing up {}!".format(event_key))
def _writerow_unicode(self, writer, row):
unicode_row = []
for s in row:
try:
unicode_row.append(s.encode("utf-8"))
except:
unicode_row.append(s)
writer.writerow(unicode_row)
class TbaCSVRestoreEventsEnqueue(webapp.RequestHandler):
"""
Enqueues CSV restore
"""
def get(self, year=None):
if tba_config.CONFIG["env"] == "prod": # disable in prod for now
logging.error("Tried to restore events from CSV for year {} in prod! No can do.".format(year))
return
if year is None:
years = range(1992, datetime.datetime.now().year + 1)
for y in years:
taskqueue.add(
url='/tasks/enqueue/csv_restore_events/{}'.format(y),
method='GET')
self.response.out.write("Enqueued restore for years: {}".format(years))
else:
event_keys = Event.query(Event.year == int(year)).fetch(None, keys_only=True)
for event_key in event_keys:
taskqueue.add(
url='/tasks/do/csv_restore_event/{}'.format(event_key.id()),
method='GET')
template_values = {'event_keys': event_keys}
path = os.path.join(os.path.dirname(__file__), '../templates/backup/csv_restore_enqueue.html')
self.response.out.write(template.render(path, template_values))
class TbaCSVRestoreEventDo(webapp.RequestHandler):
"""
Restores event awards, matches, team list, rankings, and alliance selection order
"""
BASE_URL = 'https://raw.githubusercontent.com/the-blue-alliance/tba-data-backup/master/events/{}/{}/' # % (year, event_key)
ALLIANCES_URL = BASE_URL + '{}_alliances.csv' # % (year, event_key, event_key)
AWARDS_URL = BASE_URL + '{}_awards.csv' # % (year, event_key, event_key)
MATCHES_URL = BASE_URL + '{}_matches.csv' # % (year, event_key, event_key)
RANKINGS_URL = BASE_URL + '{}_rankings.csv' # % (year, event_key, event_key)
# TEAMS_URL = BASE_URL + '{}_teams.csv' # % (year, event_key, event_key) # currently unused
def get(self, event_key):
if tba_config.CONFIG["env"] == "prod": # disable in prod for now
logging.error("Tried to restore {} from CSV in prod! No can do.".format(event_key))
return
event = Event.get_by_id(event_key)
# alliances
result = urlfetch.fetch(self.ALLIANCES_URL.format(event.year, event_key, event_key))
if result.status_code != 200:
logging.warning('Unable to retreive url: ' + (self.ALLIANCES_URL.format(event.year, event_key, event_key)))
else:
data = result.content.replace('frc', '')
alliance_selections = CSVAllianceSelectionsParser.parse(data)
if alliance_selections and event.alliance_selections != alliance_selections:
event.alliance_selections_json = json.dumps(alliance_selections)
event._alliance_selections = None
event.dirty = True
EventManipulator.createOrUpdate(event)
# awards
result = urlfetch.fetch(self.AWARDS_URL.format(event.year, event_key, event_key))
if result.status_code != 200:
logging.warning('Unable to retreive url: ' + (self.AWARDS_URL.format(event.year, event_key, event_key)))
else:
# convert into expected input format
data = StringIO.StringIO()
writer = csv.writer(data, delimiter=',')
for row in csv.reader(StringIO.StringIO(result.content), delimiter=','):
writer.writerow([event.year, event.event_short, row[1], row[2].replace('frc', ''), row[3]])
awards = []
for award in CSVAwardsParser.parse(data.getvalue()):
awards.append(Award(
id=Award.render_key_name(event.key_name, award['award_type_enum']),
name_str=award['name_str'],
award_type_enum=award['award_type_enum'],
year=event.year,
event=event.key,
event_type_enum=event.event_type_enum,
team_list=[ndb.Key(Team, 'frc{}'.format(team_number)) for team_number in award['team_number_list']],
recipient_json_list=award['recipient_json_list']
))
AwardManipulator.createOrUpdate(awards)
# matches
result = urlfetch.fetch(self.MATCHES_URL.format(event.year, event_key, event_key))
if result.status_code != 200:
logging.warning('Unable to retreive url: ' + (self.MATCHES_URL.format(event.year, event_key, event_key)))
else:
data = result.content.replace('frc', '').replace('{}_'.format(event_key), '')
match_dicts, _ = OffseasonMatchesParser.parse(data)
matches = [
Match(
id=Match.renderKeyName(
event.key.id(),
match.get("comp_level", None),
match.get("set_number", 0),
match.get("match_number", 0)),
event=event.key,
game=Match.FRC_GAMES_BY_YEAR.get(event.year, "frc_unknown"),
set_number=match.get("set_number", 0),
match_number=match.get("match_number", 0),
comp_level=match.get("comp_level", None),
team_key_names=match.get("team_key_names", None),
alliances_json=match.get("alliances_json", None)
)
for match in match_dicts]
MatchManipulator.createOrUpdate(matches)
# rankings
result = urlfetch.fetch(self.RANKINGS_URL.format(event.year, event_key, event_key))
if result.status_code != 200:
logging.warning('Unable to retreive url: ' + (self.RANKINGS_URL.format(event.year, event_key, event_key)))
else:
# convert into expected input format
rankings = list(csv.reader(StringIO.StringIO(result.content), delimiter=','))
if rankings and event.rankings != rankings:
event.rankings_json = json.dumps(rankings)
event._rankings = None
event.dirty = True
EventManipulator.createOrUpdate(event)
self.response.out.write("Done restoring {}!".format(event_key))
class TbaCSVBackupTeamsEnqueue(webapp.RequestHandler):
"""
Enqueues CSV teams backup
"""
def get(self):
taskqueue.add(
url='/tasks/do/csv_backup_teams',
method='GET')
self.response.out.write("Enqueued CSV teams backup")
class TbaCSVBackupTeamsDo(webapp.RequestHandler):
"""
Backs up teams
"""
TEAMS_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/teams/teams.csv'
def get(self):
team_keys = Team.query().order(Team.team_number).fetch(None, keys_only=True)
team_futures = ndb.get_multi_async(team_keys)
if team_futures:
with cloudstorage.open(self.TEAMS_FILENAME_PATTERN, 'w') as teams_file:
writer = csv.writer(teams_file, delimiter=',')
for team_future in team_futures:
team = team_future.get_result()
self._writerow_unicode(writer, [team.key.id(), team.nickname, team.name, team.address, team.website, team.rookie_year])
self.response.out.write("Done backing up teams!")
def _writerow_unicode(self, writer, row):
unicode_row = []
for s in row:
try:
unicode_row.append(s.encode("utf-8"))
except:
unicode_row.append(s)
writer.writerow(unicode_row)
| mit | 3,481,643,148,437,116,000 | 44.588652 | 163 | 0.596842 | false |
neingeist/azulejo | azulejo/WindowTools.py | 1 | 3160 | '''
Created on Jul 12, 2012
@author: gillesB
'''
from Window import Window
from Workarea import Workarea
class WindowTools(object):
"""
Some utilities for the windows
"""
@staticmethod
def get_active_window():
"""
Returns the active window
:return: the active window
:rtype: Window
"""
XID = Workarea._root_window.get_full_property(Workarea.atom("_NET_ACTIVE_WINDOW"), 0).value[0]
return Window(XID)
@staticmethod
def print_window_info(keybinding, param):
"""
Prints some information of the currently active window.
:param keybinding:
:type keybinding:
:param param:
:type param:
"""
window = WindowTools.get_active_window()
assert isinstance(window, Window)
window_geometry = window.get_geometry()
print "Screen resolution: "
print "Workarea width and height: ", Workarea.get_workarea_width(), Workarea.get_workarea_height()
print "Window title: ", window.get_name()
print "Window width and height", window_geometry["width"], window_geometry["height"] , "+ frame size: ", window.get_frame_extents()
print "Window position", window_geometry["x"], window_geometry["y"]
@staticmethod
def get_normal_windows_on_current_desktop():
"""
Returns all 'normal' windows which are visible on the current desktop.
:return: all 'normal' windows which are visible on the current desktop
:rtype: list[Window]
"""
def m_get_window_from_XID(XID):
return Window(XID)
def f_normal_window(window):
if WindowTools.window_is_on_current_desktop(window) and WindowTools.window_is_window_type_normal(window):
return True
return False
XIDs = Workarea.get_all_XIDs()
windows = map(m_get_window_from_XID, XIDs)
filtered_windows = filter(f_normal_window, windows)
filtered_windows.reverse()
return filtered_windows
@staticmethod
def window_is_on_current_desktop(window):
"""
Returns True if window is on current desktop, False otherwise
:param window:
:type window: Window
:return: True if window is on current desktop, False otherwise
:rtype: bool
"""
if Workarea.get_current_desktop() == window.get_desktop_id():
return True
return False
@staticmethod
def window_is_window_type_normal(window):
"""
Returns True if window is a normal window, False otherwise
:param window:
:type window: Window
:return: True if window is a normal window, False otherwise
:rtype: bool
"""
window_type = window.get_window_type()
if (window_type == Workarea.atom("_NET_WM_WINDOW_TYPE_NORMAL")
or (window_type is None and window.get_transient_for() is None)):
return True
return False
def __init__(self, params):
'''
Constructor
'''
| mit | -2,038,185,904,146,247,700 | 29.990196 | 139 | 0.592089 | false |
HomeRad/TorCleaner | wc/dns/rdtypes/IN/PX.py | 1 | 3823 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import wc.dns.exception
import wc.dns.rdata
import wc.dns.name
class PX(wc.dns.rdata.Rdata):
"""PX record.
@ivar preference: the preference value
@type preference: int
@ivar map822: the map822 name
@type map822: wc.dns.name.Name object
@ivar mapx400: the mapx400 name
@type mapx400: wc.dns.name.Name object
@see: RFC 2163"""
__slots__ = ['preference', 'map822', 'mapx400']
def __init__(self, rdclass, rdtype, preference, map822, mapx400):
super(PX, self).__init__(rdclass, rdtype)
self.preference = preference
self.map822 = map822
self.mapx400 = mapx400
def to_text(self, origin=None, relativize=True, **kw):
map822 = self.map822.choose_relativity(origin, relativize)
mapx400 = self.mapx400.choose_relativity(origin, relativize)
return '%d %s %s' % (self.preference, map822, mapx400)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
preference = tok.get_uint16()
map822 = tok.get_name()
map822 = map822.choose_relativity(origin, relativize)
mapx400 = tok.get_name(None)
mapx400 = mapx400.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, preference, map822, mapx400)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
pref = struct.pack("!H", self.preference)
file.write(pref)
self.map822.to_wire(file, None, origin)
self.mapx400.to_wire(file, None, origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(preference, ) = struct.unpack('!H', wire[current : current + 2])
current += 2
rdlen -= 2
(map822, cused) = wc.dns.name.from_wire(wire[: current + rdlen],
current)
if cused > rdlen:
raise wc.dns.exception.FormError
current += cused
rdlen -= cused
if not origin is None:
map822 = map822.relativize(origin)
(mapx400, cused) = wc.dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise wc.dns.exception.FormError
if not origin is None:
mapx400 = mapx400.relativize(origin)
return cls(rdclass, rdtype, preference, map822, mapx400)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.map822 = self.map822.choose_relativity(origin, relativize)
self.mapx400 = self.mapx400.choose_relativity(origin, relativize)
def _cmp(self, other):
sp = struct.pack("!H", self.preference)
op = struct.pack("!H", other.preference)
v = cmp(sp, op)
if v == 0:
v = cmp(self.map822, other.map822)
if v == 0:
v = cmp(self.mapx400, other.mapx400)
return v
| gpl-2.0 | -6,621,502,497,215,966,000 | 38.010204 | 79 | 0.63458 | false |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_05_01/models/_models_py3.py | 1 | 64701 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._compute_management_client_enums import *
class AccessUri(msrest.serialization.Model):
"""A disk access SAS uri.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar access_sas: A SAS uri for accessing a disk.
:vartype access_sas: str
"""
_validation = {
'access_sas': {'readonly': True},
}
_attribute_map = {
'access_sas': {'key': 'accessSAS', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessUri, self).__init__(**kwargs)
self.access_sas = None
class ApiError(msrest.serialization.Model):
"""Api error.
:param details: The Api error details.
:type details: list[~azure.mgmt.compute.v2020_05_01.models.ApiErrorBase]
:param innererror: The Api inner error.
:type innererror: ~azure.mgmt.compute.v2020_05_01.models.InnerError
:param code: The error code.
:type code: str
:param target: The target of the particular error.
:type target: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'details': {'key': 'details', 'type': '[ApiErrorBase]'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
details: Optional[List["ApiErrorBase"]] = None,
innererror: Optional["InnerError"] = None,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiError, self).__init__(**kwargs)
self.details = details
self.innererror = innererror
self.code = code
self.target = target
self.message = message
class ApiErrorBase(msrest.serialization.Model):
"""Api error base.
:param code: The error code.
:type code: str
:param target: The target of the particular error.
:type target: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiErrorBase, self).__init__(**kwargs)
self.code = code
self.target = target
self.message = message
class CreationData(msrest.serialization.Model):
"""Data used when creating a disk.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param create_option: Required. This enumerates the possible sources of a disk's creation.
Possible values include: "Empty", "Attach", "FromImage", "Import", "Copy", "Restore", "Upload".
:type create_option: str or ~azure.mgmt.compute.v2020_05_01.models.DiskCreateOption
:param storage_account_id: Required if createOption is Import. The Azure Resource Manager
identifier of the storage account containing the blob to import as a disk.
:type storage_account_id: str
:param image_reference: Disk source information.
:type image_reference: ~azure.mgmt.compute.v2020_05_01.models.ImageDiskReference
:param gallery_image_reference: Required if creating from a Gallery Image. The id of the
ImageDiskReference will be the ARM id of the shared galley image version from which to create a
disk.
:type gallery_image_reference: ~azure.mgmt.compute.v2020_05_01.models.ImageDiskReference
:param source_uri: If createOption is Import, this is the URI of a blob to be imported into a
managed disk.
:type source_uri: str
:param source_resource_id: If createOption is Copy, this is the ARM id of the source snapshot
or disk.
:type source_resource_id: str
:ivar source_unique_id: If this field is set, this is the unique id identifying the source of
this resource.
:vartype source_unique_id: str
:param upload_size_bytes: If createOption is Upload, this is the size of the contents of the
upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for
the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer).
:type upload_size_bytes: long
"""
_validation = {
'create_option': {'required': True},
'source_unique_id': {'readonly': True},
}
_attribute_map = {
'create_option': {'key': 'createOption', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
'image_reference': {'key': 'imageReference', 'type': 'ImageDiskReference'},
'gallery_image_reference': {'key': 'galleryImageReference', 'type': 'ImageDiskReference'},
'source_uri': {'key': 'sourceUri', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'source_unique_id': {'key': 'sourceUniqueId', 'type': 'str'},
'upload_size_bytes': {'key': 'uploadSizeBytes', 'type': 'long'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOption"],
storage_account_id: Optional[str] = None,
image_reference: Optional["ImageDiskReference"] = None,
gallery_image_reference: Optional["ImageDiskReference"] = None,
source_uri: Optional[str] = None,
source_resource_id: Optional[str] = None,
upload_size_bytes: Optional[int] = None,
**kwargs
):
super(CreationData, self).__init__(**kwargs)
self.create_option = create_option
self.storage_account_id = storage_account_id
self.image_reference = image_reference
self.gallery_image_reference = gallery_image_reference
self.source_uri = source_uri
self.source_resource_id = source_resource_id
self.source_unique_id = None
self.upload_size_bytes = upload_size_bytes
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class Disk(Resource):
"""Disk resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar managed_by: A relative URI containing the ID of the VM that has the disk attached.
:vartype managed_by: str
:ivar managed_by_extended: List of relative URIs containing the IDs of the VMs that have the
disk attached. maxShares should be set to a value greater than one for disks to allow attaching
them to multiple VMs.
:vartype managed_by_extended: list[str]
:param sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or
UltraSSD_LRS.
:type sku: ~azure.mgmt.compute.v2020_05_01.models.DiskSku
:param zones: The Logical zone list for Disk.
:type zones: list[str]
:ivar time_created: The time when the disk was created.
:vartype time_created: ~datetime.datetime
:param os_type: The Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2020_05_01.models.OperatingSystemTypes
:param hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS
disks only. Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2020_05_01.models.HyperVGeneration
:param creation_data: Disk source information. CreationData information cannot be changed after
the disk has been created.
:type creation_data: ~azure.mgmt.compute.v2020_05_01.models.CreationData
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:ivar disk_size_bytes: The size of the disk in bytes. This field is read only.
:vartype disk_size_bytes: long
:ivar unique_id: Unique Guid identifying the resource.
:vartype unique_id: str
:param encryption_settings_collection: Encryption settings collection used for Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2020_05_01.models.EncryptionSettingsCollection
:ivar provisioning_state: The disk provisioning state.
:vartype provisioning_state: str
:param disk_iops_read_write: The number of IOPS allowed for this disk; only settable for
UltraSSD disks. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_write: long
:param disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD
disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of
10.
:type disk_m_bps_read_write: long
:param disk_iops_read_only: The total number of IOPS that will be allowed across all VMs
mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_only: long
:param disk_m_bps_read_only: The total throughput (MBps) that will be allowed across all VMs
mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses
the ISO notation, of powers of 10.
:type disk_m_bps_read_only: long
:ivar disk_state: The state of the disk. Possible values include: "Unattached", "Attached",
"Reserved", "ActiveSAS", "ReadyToUpload", "ActiveUpload".
:vartype disk_state: str or ~azure.mgmt.compute.v2020_05_01.models.DiskState
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2020_05_01.models.Encryption
:param max_shares: The maximum number of VMs that can attach to the disk at the same time.
Value greater than one indicates a disk that can be mounted on multiple VMs at the same time.
:type max_shares: int
:ivar share_info: Details of the list of all VMs that have the disk attached. maxShares should
be set to a value greater than one for disks to allow attaching them to multiple VMs.
:vartype share_info: list[~azure.mgmt.compute.v2020_05_01.models.ShareInfoElement]
:param network_access_policy: Policy for accessing the disk via network. Possible values
include: "AllowAll", "AllowPrivate", "DenyAll".
:type network_access_policy: str or ~azure.mgmt.compute.v2020_05_01.models.NetworkAccessPolicy
:param disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks.
:type disk_access_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'managed_by_extended': {'readonly': True},
'time_created': {'readonly': True},
'disk_size_bytes': {'readonly': True},
'unique_id': {'readonly': True},
'provisioning_state': {'readonly': True},
'disk_state': {'readonly': True},
'share_info': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'managed_by_extended': {'key': 'managedByExtended', 'type': '[str]'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'zones': {'key': 'zones', 'type': '[str]'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'long'},
'disk_iops_read_only': {'key': 'properties.diskIOPSReadOnly', 'type': 'long'},
'disk_m_bps_read_only': {'key': 'properties.diskMBpsReadOnly', 'type': 'long'},
'disk_state': {'key': 'properties.diskState', 'type': 'str'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'max_shares': {'key': 'properties.maxShares', 'type': 'int'},
'share_info': {'key': 'properties.shareInfo', 'type': '[ShareInfoElement]'},
'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'},
'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["DiskSku"] = None,
zones: Optional[List[str]] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
hyper_v_generation: Optional[Union[str, "HyperVGeneration"]] = None,
creation_data: Optional["CreationData"] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
disk_iops_read_only: Optional[int] = None,
disk_m_bps_read_only: Optional[int] = None,
encryption: Optional["Encryption"] = None,
max_shares: Optional[int] = None,
network_access_policy: Optional[Union[str, "NetworkAccessPolicy"]] = None,
disk_access_id: Optional[str] = None,
**kwargs
):
super(Disk, self).__init__(location=location, tags=tags, **kwargs)
self.managed_by = None
self.managed_by_extended = None
self.sku = sku
self.zones = zones
self.time_created = None
self.os_type = os_type
self.hyper_v_generation = hyper_v_generation
self.creation_data = creation_data
self.disk_size_gb = disk_size_gb
self.disk_size_bytes = None
self.unique_id = None
self.encryption_settings_collection = encryption_settings_collection
self.provisioning_state = None
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
self.disk_iops_read_only = disk_iops_read_only
self.disk_m_bps_read_only = disk_m_bps_read_only
self.disk_state = None
self.encryption = encryption
self.max_shares = max_shares
self.share_info = None
self.network_access_policy = network_access_policy
self.disk_access_id = disk_access_id
class DiskAccess(Resource):
"""disk access resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar private_endpoint_connections: A readonly collection of private endpoint connections
created on the disk. Currently only one endpoint connection is supported.
:vartype private_endpoint_connections:
list[~azure.mgmt.compute.v2020_05_01.models.PrivateEndpointConnection]
:ivar provisioning_state: The disk access resource provisioning state.
:vartype provisioning_state: str
:ivar time_created: The time when the disk access was created.
:vartype time_created: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'private_endpoint_connections': {'readonly': True},
'provisioning_state': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(DiskAccess, self).__init__(location=location, tags=tags, **kwargs)
self.private_endpoint_connections = None
self.provisioning_state = None
self.time_created = None
class DiskAccessList(msrest.serialization.Model):
"""The List disk access operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of disk access resources.
:type value: list[~azure.mgmt.compute.v2020_05_01.models.DiskAccess]
:param next_link: The uri to fetch the next page of disk access resources. Call ListNext() with
this to fetch the next page of disk access resources.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DiskAccess]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DiskAccess"],
next_link: Optional[str] = None,
**kwargs
):
super(DiskAccessList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DiskAccessUpdate(msrest.serialization.Model):
"""Used for updating a disk access resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(DiskAccessUpdate, self).__init__(**kwargs)
self.tags = tags
class DiskEncryptionSet(Resource):
"""disk encryption set resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: The managed identity for the disk encryption set. It should be given
permission on the key vault before it can be used to encrypt disks.
:type identity: ~azure.mgmt.compute.v2020_05_01.models.EncryptionSetIdentity
:param encryption_type: The type of key used to encrypt the data of the disk. Possible values
include: "EncryptionAtRestWithPlatformKey", "EncryptionAtRestWithCustomerKey",
"EncryptionAtRestWithPlatformAndCustomerKeys".
:type encryption_type: str or ~azure.mgmt.compute.v2020_05_01.models.EncryptionType
:param active_key: The key vault key which is currently used by this disk encryption set.
:type active_key: ~azure.mgmt.compute.v2020_05_01.models.KeyVaultAndKeyReference
:ivar previous_keys: A readonly collection of key vault keys previously used by this disk
encryption set while a key rotation is in progress. It will be empty if there is no ongoing key
rotation.
:vartype previous_keys: list[~azure.mgmt.compute.v2020_05_01.models.KeyVaultAndKeyReference]
:ivar provisioning_state: The disk encryption set provisioning state.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'previous_keys': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'EncryptionSetIdentity'},
'encryption_type': {'key': 'properties.encryptionType', 'type': 'str'},
'active_key': {'key': 'properties.activeKey', 'type': 'KeyVaultAndKeyReference'},
'previous_keys': {'key': 'properties.previousKeys', 'type': '[KeyVaultAndKeyReference]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
identity: Optional["EncryptionSetIdentity"] = None,
encryption_type: Optional[Union[str, "EncryptionType"]] = None,
active_key: Optional["KeyVaultAndKeyReference"] = None,
**kwargs
):
super(DiskEncryptionSet, self).__init__(location=location, tags=tags, **kwargs)
self.identity = identity
self.encryption_type = encryption_type
self.active_key = active_key
self.previous_keys = None
self.provisioning_state = None
class DiskEncryptionSetList(msrest.serialization.Model):
"""The List disk encryption set operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of disk encryption sets.
:type value: list[~azure.mgmt.compute.v2020_05_01.models.DiskEncryptionSet]
:param next_link: The uri to fetch the next page of disk encryption sets. Call ListNext() with
this to fetch the next page of disk encryption sets.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DiskEncryptionSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DiskEncryptionSet"],
next_link: Optional[str] = None,
**kwargs
):
super(DiskEncryptionSetList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DiskEncryptionSetUpdate(msrest.serialization.Model):
"""disk encryption set update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param encryption_type: The type of key used to encrypt the data of the disk. Possible values
include: "EncryptionAtRestWithPlatformKey", "EncryptionAtRestWithCustomerKey",
"EncryptionAtRestWithPlatformAndCustomerKeys".
:type encryption_type: str or ~azure.mgmt.compute.v2020_05_01.models.EncryptionType
:param active_key: Key Vault Key Url and vault id of KeK, KeK is optional and when provided is
used to unwrap the encryptionKey.
:type active_key: ~azure.mgmt.compute.v2020_05_01.models.KeyVaultAndKeyReference
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'encryption_type': {'key': 'properties.encryptionType', 'type': 'str'},
'active_key': {'key': 'properties.activeKey', 'type': 'KeyVaultAndKeyReference'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
encryption_type: Optional[Union[str, "EncryptionType"]] = None,
active_key: Optional["KeyVaultAndKeyReference"] = None,
**kwargs
):
super(DiskEncryptionSetUpdate, self).__init__(**kwargs)
self.tags = tags
self.encryption_type = encryption_type
self.active_key = active_key
class DiskList(msrest.serialization.Model):
"""The List Disks operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of disks.
:type value: list[~azure.mgmt.compute.v2020_05_01.models.Disk]
:param next_link: The uri to fetch the next page of disks. Call ListNext() with this to fetch
the next page of disks.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Disk]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Disk"],
next_link: Optional[str] = None,
**kwargs
):
super(DiskList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DiskSku(msrest.serialization.Model):
"""The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The sku name. Possible values include: "Standard_LRS", "Premium_LRS",
"StandardSSD_LRS", "UltraSSD_LRS".
:type name: str or ~azure.mgmt.compute.v2020_05_01.models.DiskStorageAccountTypes
:ivar tier: The sku tier.
:vartype tier: str
"""
_validation = {
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "DiskStorageAccountTypes"]] = None,
**kwargs
):
super(DiskSku, self).__init__(**kwargs)
self.name = name
self.tier = None
class DiskUpdate(msrest.serialization.Model):
"""Disk update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or
UltraSSD_LRS.
:type sku: ~azure.mgmt.compute.v2020_05_01.models.DiskSku
:param os_type: the Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2020_05_01.models.OperatingSystemTypes
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2020_05_01.models.EncryptionSettingsCollection
:param disk_iops_read_write: The number of IOPS allowed for this disk; only settable for
UltraSSD disks. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_write: long
:param disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD
disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of
10.
:type disk_m_bps_read_write: long
:param disk_iops_read_only: The total number of IOPS that will be allowed across all VMs
mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_only: long
:param disk_m_bps_read_only: The total throughput (MBps) that will be allowed across all VMs
mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses
the ISO notation, of powers of 10.
:type disk_m_bps_read_only: long
:param max_shares: The maximum number of VMs that can attach to the disk at the same time.
Value greater than one indicates a disk that can be mounted on multiple VMs at the same time.
:type max_shares: int
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2020_05_01.models.Encryption
:param network_access_policy: Policy for accessing the disk via network. Possible values
include: "AllowAll", "AllowPrivate", "DenyAll".
:type network_access_policy: str or ~azure.mgmt.compute.v2020_05_01.models.NetworkAccessPolicy
:param disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks.
:type disk_access_id: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'long'},
'disk_iops_read_only': {'key': 'properties.diskIOPSReadOnly', 'type': 'long'},
'disk_m_bps_read_only': {'key': 'properties.diskMBpsReadOnly', 'type': 'long'},
'max_shares': {'key': 'properties.maxShares', 'type': 'int'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'},
'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["DiskSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
disk_iops_read_only: Optional[int] = None,
disk_m_bps_read_only: Optional[int] = None,
max_shares: Optional[int] = None,
encryption: Optional["Encryption"] = None,
network_access_policy: Optional[Union[str, "NetworkAccessPolicy"]] = None,
disk_access_id: Optional[str] = None,
**kwargs
):
super(DiskUpdate, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.os_type = os_type
self.disk_size_gb = disk_size_gb
self.encryption_settings_collection = encryption_settings_collection
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
self.disk_iops_read_only = disk_iops_read_only
self.disk_m_bps_read_only = disk_m_bps_read_only
self.max_shares = max_shares
self.encryption = encryption
self.network_access_policy = network_access_policy
self.disk_access_id = disk_access_id
class Encryption(msrest.serialization.Model):
"""Encryption at rest settings for disk or snapshot.
:param disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling
encryption at rest.
:type disk_encryption_set_id: str
:param type: The type of key used to encrypt the data of the disk. Possible values include:
"EncryptionAtRestWithPlatformKey", "EncryptionAtRestWithCustomerKey",
"EncryptionAtRestWithPlatformAndCustomerKeys".
:type type: str or ~azure.mgmt.compute.v2020_05_01.models.EncryptionType
"""
_attribute_map = {
'disk_encryption_set_id': {'key': 'diskEncryptionSetId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
disk_encryption_set_id: Optional[str] = None,
type: Optional[Union[str, "EncryptionType"]] = None,
**kwargs
):
super(Encryption, self).__init__(**kwargs)
self.disk_encryption_set_id = disk_encryption_set_id
self.type = type
class EncryptionSetIdentity(msrest.serialization.Model):
"""The managed identity for the disk encryption set. It should be given permission on the key vault before it can be used to encrypt disks.
Variables are only populated by the server, and will be ignored when sending a request.
:param type: The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is
supported. Possible values include: "SystemAssigned".
:type type: str or ~azure.mgmt.compute.v2020_05_01.models.DiskEncryptionSetIdentityType
:ivar principal_id: The object id of the Managed Identity Resource. This will be sent to the RP
from ARM via the x-ms-identity-principal-id header in the PUT request if the resource has a
systemAssigned(implicit) identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the Managed Identity Resource. This will be sent to the RP
from ARM via the x-ms-client-tenant-id header in the PUT request if the resource has a
systemAssigned(implicit) identity.
:vartype tenant_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "DiskEncryptionSetIdentityType"]] = None,
**kwargs
):
super(EncryptionSetIdentity, self).__init__(**kwargs)
self.type = type
self.principal_id = None
self.tenant_id = None
class EncryptionSettingsCollection(msrest.serialization.Model):
"""Encryption settings for disk or snapshot.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Set this flag to true and provide DiskEncryptionKey and optional
KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and
KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object,
the existing settings remain unchanged.
:type enabled: bool
:param encryption_settings: A collection of encryption settings, one for each disk volume.
:type encryption_settings:
list[~azure.mgmt.compute.v2020_05_01.models.EncryptionSettingsElement]
:param encryption_settings_version: Describes what type of encryption is used for the disks.
Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption
with AAD app.'1.1' corresponds to Azure Disk Encryption.
:type encryption_settings_version: str
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'encryption_settings': {'key': 'encryptionSettings', 'type': '[EncryptionSettingsElement]'},
'encryption_settings_version': {'key': 'encryptionSettingsVersion', 'type': 'str'},
}
def __init__(
self,
*,
enabled: bool,
encryption_settings: Optional[List["EncryptionSettingsElement"]] = None,
encryption_settings_version: Optional[str] = None,
**kwargs
):
super(EncryptionSettingsCollection, self).__init__(**kwargs)
self.enabled = enabled
self.encryption_settings = encryption_settings
self.encryption_settings_version = encryption_settings_version
class EncryptionSettingsElement(msrest.serialization.Model):
"""Encryption settings for one disk volume.
:param disk_encryption_key: Key Vault Secret Url and vault id of the disk encryption key.
:type disk_encryption_key: ~azure.mgmt.compute.v2020_05_01.models.KeyVaultAndSecretReference
:param key_encryption_key: Key Vault Key Url and vault id of the key encryption key.
KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
:type key_encryption_key: ~azure.mgmt.compute.v2020_05_01.models.KeyVaultAndKeyReference
"""
_attribute_map = {
'disk_encryption_key': {'key': 'diskEncryptionKey', 'type': 'KeyVaultAndSecretReference'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultAndKeyReference'},
}
def __init__(
self,
*,
disk_encryption_key: Optional["KeyVaultAndSecretReference"] = None,
key_encryption_key: Optional["KeyVaultAndKeyReference"] = None,
**kwargs
):
super(EncryptionSettingsElement, self).__init__(**kwargs)
self.disk_encryption_key = disk_encryption_key
self.key_encryption_key = key_encryption_key
class GrantAccessData(msrest.serialization.Model):
"""Data used for requesting a SAS.
All required parameters must be populated in order to send to Azure.
:param access: Required. Possible values include: "None", "Read", "Write".
:type access: str or ~azure.mgmt.compute.v2020_05_01.models.AccessLevel
:param duration_in_seconds: Required. Time duration in seconds until the SAS access expires.
:type duration_in_seconds: int
"""
_validation = {
'access': {'required': True},
'duration_in_seconds': {'required': True},
}
_attribute_map = {
'access': {'key': 'access', 'type': 'str'},
'duration_in_seconds': {'key': 'durationInSeconds', 'type': 'int'},
}
def __init__(
self,
*,
access: Union[str, "AccessLevel"],
duration_in_seconds: int,
**kwargs
):
super(GrantAccessData, self).__init__(**kwargs)
self.access = access
self.duration_in_seconds = duration_in_seconds
class ImageDiskReference(msrest.serialization.Model):
"""The source image used for creating the disk.
All required parameters must be populated in order to send to Azure.
:param id: Required. A relative uri containing either a Platform Image Repository or user image
reference.
:type id: str
:param lun: If the disk is created from an image's data disk, this is an index that indicates
which of the data disks in the image to use. For OS disks, this field is null.
:type lun: int
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
*,
id: str,
lun: Optional[int] = None,
**kwargs
):
super(ImageDiskReference, self).__init__(**kwargs)
self.id = id
self.lun = lun
class InnerError(msrest.serialization.Model):
"""Inner error details.
:param exceptiontype: The exception type.
:type exceptiontype: str
:param errordetail: The internal error message or exception dump.
:type errordetail: str
"""
_attribute_map = {
'exceptiontype': {'key': 'exceptiontype', 'type': 'str'},
'errordetail': {'key': 'errordetail', 'type': 'str'},
}
def __init__(
self,
*,
exceptiontype: Optional[str] = None,
errordetail: Optional[str] = None,
**kwargs
):
super(InnerError, self).__init__(**kwargs)
self.exceptiontype = exceptiontype
self.errordetail = errordetail
class KeyVaultAndKeyReference(msrest.serialization.Model):
"""Key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to unwrap the encryptionKey.
All required parameters must be populated in order to send to Azure.
:param source_vault: Required. Resource id of the KeyVault containing the key or secret.
:type source_vault: ~azure.mgmt.compute.v2020_05_01.models.SourceVault
:param key_url: Required. Url pointing to a key or secret in KeyVault.
:type key_url: str
"""
_validation = {
'source_vault': {'required': True},
'key_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'key_url': {'key': 'keyUrl', 'type': 'str'},
}
def __init__(
self,
*,
source_vault: "SourceVault",
key_url: str,
**kwargs
):
super(KeyVaultAndKeyReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.key_url = key_url
class KeyVaultAndSecretReference(msrest.serialization.Model):
"""Key Vault Secret Url and vault id of the encryption key.
All required parameters must be populated in order to send to Azure.
:param source_vault: Required. Resource id of the KeyVault containing the key or secret.
:type source_vault: ~azure.mgmt.compute.v2020_05_01.models.SourceVault
:param secret_url: Required. Url pointing to a key or secret in KeyVault.
:type secret_url: str
"""
_validation = {
'source_vault': {'required': True},
'secret_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'secret_url': {'key': 'secretUrl', 'type': 'str'},
}
def __init__(
self,
*,
source_vault: "SourceVault",
secret_url: str,
**kwargs
):
super(KeyVaultAndSecretReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.secret_url = secret_url
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(msrest.serialization.Model):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: private endpoint connection Id.
:vartype id: str
:ivar name: private endpoint connection name.
:vartype name: str
:ivar type: private endpoint connection type.
:vartype type: str
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~azure.mgmt.compute.v2020_05_01.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between DiskAccess and Virtual Network.
:type private_link_service_connection_state:
~azure.mgmt.compute.v2020_05_01.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.compute.v2020_05_01.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
private_endpoint: Optional["PrivateEndpoint"] = None,
private_link_service_connection_state: Optional["PrivateLinkServiceConnectionState"] = None,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
self.provisioning_state = None
class PrivateLinkResource(msrest.serialization.Model):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: private link resource Id.
:vartype id: str
:ivar name: private link resource name.
:vartype name: str
:ivar type: private link resource type.
:vartype type: str
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
*,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.group_id = None
self.required_members = None
self.required_zone_names = required_zone_names
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~azure.mgmt.compute.v2020_05_01.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
*,
value: Optional[List["PrivateLinkResource"]] = None,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = value
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected".
:type status: str or
~azure.mgmt.compute.v2020_05_01.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[Union[str, "PrivateEndpointServiceConnectionStatus"]] = None,
description: Optional[str] = None,
actions_required: Optional[str] = None,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class ShareInfoElement(msrest.serialization.Model):
"""ShareInfoElement.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar vm_uri: A relative URI containing the ID of the VM that has the disk attached.
:vartype vm_uri: str
"""
_validation = {
'vm_uri': {'readonly': True},
}
_attribute_map = {
'vm_uri': {'key': 'vmUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ShareInfoElement, self).__init__(**kwargs)
self.vm_uri = None
class Snapshot(Resource):
"""Snapshot resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar managed_by: Unused. Always Null.
:vartype managed_by: str
:param sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
:type sku: ~azure.mgmt.compute.v2020_05_01.models.SnapshotSku
:ivar time_created: The time when the snapshot was created.
:vartype time_created: ~datetime.datetime
:param os_type: The Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2020_05_01.models.OperatingSystemTypes
:param hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS
disks only. Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2020_05_01.models.HyperVGeneration
:param creation_data: Disk source information. CreationData information cannot be changed after
the disk has been created.
:type creation_data: ~azure.mgmt.compute.v2020_05_01.models.CreationData
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:ivar disk_size_bytes: The size of the disk in bytes. This field is read only.
:vartype disk_size_bytes: long
:ivar unique_id: Unique Guid identifying the resource.
:vartype unique_id: str
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2020_05_01.models.EncryptionSettingsCollection
:ivar provisioning_state: The disk provisioning state.
:vartype provisioning_state: str
:param incremental: Whether a snapshot is incremental. Incremental snapshots on the same disk
occupy less space than full snapshots and can be diffed.
:type incremental: bool
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2020_05_01.models.Encryption
:param network_access_policy: Policy for accessing the disk via network. Possible values
include: "AllowAll", "AllowPrivate", "DenyAll".
:type network_access_policy: str or ~azure.mgmt.compute.v2020_05_01.models.NetworkAccessPolicy
:param disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks.
:type disk_access_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'time_created': {'readonly': True},
'disk_size_bytes': {'readonly': True},
'unique_id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'SnapshotSku'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'incremental': {'key': 'properties.incremental', 'type': 'bool'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'},
'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SnapshotSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
hyper_v_generation: Optional[Union[str, "HyperVGeneration"]] = None,
creation_data: Optional["CreationData"] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
incremental: Optional[bool] = None,
encryption: Optional["Encryption"] = None,
network_access_policy: Optional[Union[str, "NetworkAccessPolicy"]] = None,
disk_access_id: Optional[str] = None,
**kwargs
):
super(Snapshot, self).__init__(location=location, tags=tags, **kwargs)
self.managed_by = None
self.sku = sku
self.time_created = None
self.os_type = os_type
self.hyper_v_generation = hyper_v_generation
self.creation_data = creation_data
self.disk_size_gb = disk_size_gb
self.disk_size_bytes = None
self.unique_id = None
self.encryption_settings_collection = encryption_settings_collection
self.provisioning_state = None
self.incremental = incremental
self.encryption = encryption
self.network_access_policy = network_access_policy
self.disk_access_id = disk_access_id
class SnapshotList(msrest.serialization.Model):
"""The List Snapshots operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of snapshots.
:type value: list[~azure.mgmt.compute.v2020_05_01.models.Snapshot]
:param next_link: The uri to fetch the next page of snapshots. Call ListNext() with this to
fetch the next page of snapshots.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Snapshot]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Snapshot"],
next_link: Optional[str] = None,
**kwargs
):
super(SnapshotList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SnapshotSku(msrest.serialization.Model):
"""The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The sku name. Possible values include: "Standard_LRS", "Premium_LRS",
"Standard_ZRS".
:type name: str or ~azure.mgmt.compute.v2020_05_01.models.SnapshotStorageAccountTypes
:ivar tier: The sku tier.
:vartype tier: str
"""
_validation = {
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "SnapshotStorageAccountTypes"]] = None,
**kwargs
):
super(SnapshotSku, self).__init__(**kwargs)
self.name = name
self.tier = None
class SnapshotUpdate(msrest.serialization.Model):
"""Snapshot update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
:type sku: ~azure.mgmt.compute.v2020_05_01.models.SnapshotSku
:param os_type: the Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2020_05_01.models.OperatingSystemTypes
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2020_05_01.models.EncryptionSettingsCollection
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2020_05_01.models.Encryption
:param network_access_policy: Policy for accessing the disk via network. Possible values
include: "AllowAll", "AllowPrivate", "DenyAll".
:type network_access_policy: str or ~azure.mgmt.compute.v2020_05_01.models.NetworkAccessPolicy
:param disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks.
:type disk_access_id: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'SnapshotSku'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'},
'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SnapshotSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
encryption: Optional["Encryption"] = None,
network_access_policy: Optional[Union[str, "NetworkAccessPolicy"]] = None,
disk_access_id: Optional[str] = None,
**kwargs
):
super(SnapshotUpdate, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.os_type = os_type
self.disk_size_gb = disk_size_gb
self.encryption_settings_collection = encryption_settings_collection
self.encryption = encryption
self.network_access_policy = network_access_policy
self.disk_access_id = disk_access_id
class SourceVault(msrest.serialization.Model):
"""The vault id is an Azure Resource Manager Resource id in the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}.
:param id: Resource Id.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(SourceVault, self).__init__(**kwargs)
self.id = id
| mit | 8,620,681,322,227,704,000 | 39.012987 | 188 | 0.638676 | false |
keen99/SickRage | sickbeard/dailysearcher.py | 1 | 4533 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import threading
import traceback
import sickbeard
from sickbeard import logger
from sickbeard import db
from sickbeard import common
from sickbeard import helpers
from sickbeard import exceptions
from sickbeard import network_timezones
from sickbeard.exceptions import ex
from sickbeard.common import SKIPPED
from common import Quality, qualityPresetStrings, statusStrings
class DailySearcher():
def __init__(self):
self.lock = threading.Lock()
self.amActive = False
def run(self, force=False):
if self.amActive:
return
self.amActive = True
logger.log(u"Searching for new released episodes ...")
if not network_timezones.network_dict:
network_timezones.update_network_dict()
if network_timezones.network_dict:
curDate = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
else:
curDate = (datetime.date.today() + datetime.timedelta(days=2)).toordinal()
curTime = datetime.datetime.now(network_timezones.sb_timezone)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND (airdate <= ? and airdate > 1)",
[common.UNAIRED, curDate])
sql_l = []
show = None
for sqlEp in sqlResults:
try:
if not show or int(sqlEp["showid"]) != show.indexerid:
show = helpers.findCertainShow(sickbeard.showList, int(sqlEp["showid"]))
# for when there is orphaned series in the database but not loaded into our showlist
if not show or show.paused:
continue
except exceptions.MultipleShowObjectsException:
logger.log(u"ERROR: expected to find a single show matching " + str(sqlEp['showid']))
continue
try:
end_time = network_timezones.parse_date_time(sqlEp['airdate'], show.airs,
show.network) + datetime.timedelta(
minutes=helpers.tryInt(show.runtime, 60))
# filter out any episodes that haven't aried yet
if end_time > curTime:
continue
except:
# if an error occured assume the episode hasn't aired yet
continue
UpdateWantedList = 0
ep = show.getEpisode(int(sqlEp["season"]), int(sqlEp["episode"]))
with ep.lock:
if ep.season == 0:
logger.log(u"New episode " + ep.prettyName() + " airs today, setting status to SKIPPED because is a special season")
ep.status = common.SKIPPED
elif sickbeard.TRAKT_USE_ROLLING_DOWNLOAD and sickbeard.USE_TRAKT:
ep.status = common.SKIPPED
UpdateWantedList = 1
else:
logger.log(u"New episode %s airs today, setting to default episode status for this show: %s" % (ep.prettyName(), common.statusStrings[ep.show.default_ep_status]))
ep.status = ep.show.default_ep_status
sql_l.append(ep.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
else:
logger.log(u"No new released episodes found ...")
sickbeard.traktRollingScheduler.action.updateWantedList()
# queue episode for daily search
dailysearch_queue_item = sickbeard.search_queue.DailySearchQueueItem()
sickbeard.searchQueueScheduler.action.add_item(dailysearch_queue_item)
self.amActive = False
| gpl-3.0 | -3,312,056,714,057,756,700 | 37.415254 | 182 | 0.62056 | false |
trackmastersteve/alienfx | alienfx/core/controller_m17xr4.py | 1 | 5637 | #
# controller_m17xr3.py
#
# Copyright (C) 2013-2014 Ashwin Menon <[email protected]>
# Copyright (C) 2015-2021 Track Master Steve <[email protected]>
#
# Alienfx is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# Alienfx is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with alienfx. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
""" Specialization of the AlienFxController class for the M17xR4 controller.
This module provides the following classes:
AlienFXControllerM17xR4 : M17xR4 controller
"""
import alienfx.core.controller as alienfx_controller
class AlienFXControllerM17xR4(alienfx_controller.AlienFXController):
""" Specialization of the AlienFxController class for the M17xR4 controller.
"""
# Speed capabilities. The higher the number, the slower the speed of
# blink/morph actions. The min speed is selected by trial and error as
# the lowest value that will not result in strange blink/morph behaviour.
DEFAULT_SPEED = 75
MIN_SPEED = 1
# Zone codes
LEFT_KEYBOARD = 0x0008 # Code OK
MIDDLE_LEFT_KEYBOARD = 0x0004 # Code OK
MIDDLE_RIGHT_KEYBOARD = 0x0002 # Code OK
RIGHT_KEYBOARD = 0x0001 # Code OK
# 0x000F - Keyboard: all fields (0x1+0x2+0x4+0x8=0xF). You may have look at reverse-engineering-knowledgebase.md
RIGHT_SPEAKER = 0x0800 # Code OK, Bottom - Right light bar
LEFT_SPEAKER = 0x0400 # Code OK, Bottom - Left light bar
LEFT_DISPLAY = 0x1000 # Code OK, Display - Left light bar
RIGHT_DISPLAY = 0x2000 # Code OK, Display - Right light bar
ALIEN_HEAD = 0x0020 # Code OK
LOGO = 0x0040 # Code OK. Alienware-logo below screen.
# 0x0060 seems to bee alien head and logo (0x20+0x40=0x60). You may have look at reverse-engineering-knowledgebase.md
# Touchpad:
# Seems OK. You may need to set touchpad-lightning to always on in BIOS for this to work,
# as the on-touch-event seems to be not recognized correctly
TOUCH_PAD = 0x0080 # Code OK. Have a look at your BIOS settings.
MEDIA_BAR = 0x4000 # Seems OK. If Media_Bar should be Macro-Key-Bar
POWER_BUTTON = 0x0100 # Seems OK. Caution: S1 (Boot) conflicts with settings for other states...
# HDD_LEDS = ??? # Inactive: Device has no hdd indicator
# Reset codes
RESET_ALL_LIGHTS_OFF = 3
RESET_ALL_LIGHTS_ON = 4
# State codes
BOOT = 1 # Seems some zone can only be defined by Boot-State and have no effect on higher states
AC_SLEEP = 2
AC_CHARGED = 5
AC_CHARGING = 6
BATTERY_SLEEP = 7
BATTERY_ON = 8
BATTERY_CRITICAL = 9
#Controller Type
# Defines the controllertype:
# 1 = old pre Alienware 17R4 (4 bits per color)
# 2 = AW17R4 and probably others, which are using 8 bits per color
MYCONTROLLERREV = 2
def __init__(self):
# For new controller-defintions controller-revision should be provided as it defaults to 1!
# Wrong revision might result in packet errors 32 and 75 (Overflow and Pipeoverflow)
alienfx_controller.AlienFXController.__init__(self, self.MYCONTROLLERREV)
self.name = "Alienware M17xR4"
# USB VID and PID
self.vendor_id = 0x187c
self.product_id = 0x0530
# map the zone names to their codes
self.zone_map = {
self.ZONE_LEFT_KEYBOARD: self.LEFT_KEYBOARD,
self.ZONE_MIDDLE_LEFT_KEYBOARD: self.MIDDLE_LEFT_KEYBOARD,
self.ZONE_MIDDLE_RIGHT_KEYBOARD: self.MIDDLE_RIGHT_KEYBOARD,
self.ZONE_RIGHT_KEYBOARD: self.RIGHT_KEYBOARD,
self.ZONE_RIGHT_SPEAKER: self.RIGHT_SPEAKER,
self.ZONE_LEFT_SPEAKER: self.LEFT_SPEAKER,
self.ZONE_ALIEN_HEAD: self.ALIEN_HEAD,
self.ZONE_LOGO: self.LOGO,
self.ZONE_TOUCH_PAD: self.TOUCH_PAD,
self.ZONE_MEDIA_BAR: self.MEDIA_BAR,
self.ZONE_POWER_BUTTON: self.POWER_BUTTON,
self.ZONE_LEFT_DISPLAY: self.LEFT_DISPLAY,
self.ZONE_RIGHT_DISPLAY: self.RIGHT_DISPLAY
# self.ZONE_HDD_LEDS: self.HDD_LEDS, # Not used, as de AW17R4 does not have an HDD indicator
}
# zones that have special behaviour in the different power states
self.power_zones = [
self.ZONE_POWER_BUTTON # ,
# self.ZONE_HDD_LEDS
]
# map the reset names to their codes
self.reset_types = {
self.RESET_ALL_LIGHTS_OFF: "all-lights-off",
self.RESET_ALL_LIGHTS_ON: "all-lights-on"
}
# map the state names to their codes
self.state_map = {
self.STATE_BOOT: self.BOOT,
self.STATE_AC_SLEEP: self.AC_SLEEP,
self.STATE_AC_CHARGED: self.AC_CHARGED,
self.STATE_AC_CHARGING: self.AC_CHARGING,
self.STATE_BATTERY_SLEEP: self.BATTERY_SLEEP,
self.STATE_BATTERY_ON: self.BATTERY_ON,
self.STATE_BATTERY_CRITICAL: self.BATTERY_CRITICAL
}
alienfx_controller.AlienFXController.supported_controllers.append(
AlienFXControllerM17xR4())
| gpl-3.0 | -5,833,071,122,726,631,000 | 38.697183 | 121 | 0.665247 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/site_seal_request.py | 1 | 1069 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SiteSealRequest(Model):
"""Site seal request.
:param light_theme: If <code>true</code> use the light color theme for
site seal; otherwise, use the default color theme.
:type light_theme: bool
:param locale: Locale of site seal.
:type locale: str
"""
_attribute_map = {
'light_theme': {'key': 'lightTheme', 'type': 'bool'},
'locale': {'key': 'locale', 'type': 'str'},
}
def __init__(self, light_theme=None, locale=None):
self.light_theme = light_theme
self.locale = locale
| mit | 4,656,552,938,915,643,000 | 32.40625 | 76 | 0.571562 | false |
brandicted/nefertari | nefertari/renderers.py | 2 | 6370 | import json
import logging
from datetime import date, datetime
from nefertari import wrappers
from nefertari.utils import get_json_encoder
from nefertari.json_httpexceptions import JHTTPOk, JHTTPCreated
from nefertari.events import trigger_after_events
log = logging.getLogger(__name__)
class _JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime, date)):
return obj.strftime("%Y-%m-%dT%H:%M:%SZ") # iso
try:
return super(_JSONEncoder, self).default(obj)
except TypeError:
return str(obj) # fallback to str
class JsonRendererFactory(object):
def __init__(self, info):
""" Constructor: info will be an object having the
following attributes: name (the renderer name), package
(the package that was 'current' at the time the
renderer was registered), type (the renderer type
name), registry (the current application registry) and
settings (the deployment settings dictionary). """
pass
def _set_content_type(self, system):
""" Set response content type """
request = system.get('request')
if request:
response = request.response
ct = response.content_type
if ct == response.default_content_type:
response.content_type = 'application/json'
def _render_response(self, value, system):
""" Render a response """
view = system['view']
enc_class = getattr(view, '_json_encoder', None)
if enc_class is None:
enc_class = get_json_encoder()
return json.dumps(value, cls=enc_class)
def __call__(self, value, system):
""" Call the renderer implementation with the value
and the system value passed in as arguments and return
the result (a string or unicode object). The value is
the return value of a view. The system value is a
dictionary containing available system values
(e.g. view, context, and request).
"""
self._set_content_type(system)
# run after_calls on the value before jsonifying
value = self.run_after_calls(value, system)
value = self._trigger_events(value, system)
return self._render_response(value, system)
def _trigger_events(self, value, system):
view_obj = system['view'](system['context'], system['request'])
view_obj._response = value
evt = trigger_after_events(view_obj)
return evt.response
def run_after_calls(self, value, system):
request = system.get('request')
if request and hasattr(request, 'action'):
if request.action in ['index', 'show']:
value = wrappers.wrap_in_dict(request)(result=value)
return value
class DefaultResponseRendererMixin(object):
""" Renderer mixin that generates responses for all create/update/delete
view methods.
"""
def _get_common_kwargs(self, system):
""" Get kwargs common for all methods. """
enc_class = getattr(system['view'], '_json_encoder', None)
if enc_class is None:
enc_class = get_json_encoder()
return {
'request': system['request'],
'encoder': enc_class,
}
def _get_create_update_kwargs(self, value, common_kw):
""" Get kwargs common to create, update, replace. """
kw = common_kw.copy()
kw['body'] = value
if '_self' in value:
kw['headers'] = [('Location', value['_self'])]
return kw
def render_create(self, value, system, common_kw):
""" Render response for view `create` method (collection POST) """
kw = self._get_create_update_kwargs(value, common_kw)
return JHTTPCreated(**kw)
def render_update(self, value, system, common_kw):
""" Render response for view `update` method (item PATCH) """
kw = self._get_create_update_kwargs(value, common_kw)
return JHTTPOk('Updated', **kw)
def render_replace(self, *args, **kwargs):
""" Render response for view `replace` method (item PUT) """
return self.render_update(*args, **kwargs)
def render_delete(self, value, system, common_kw):
""" Render response for view `delete` method (item DELETE) """
return JHTTPOk('Deleted', **common_kw.copy())
def render_delete_many(self, value, system, common_kw):
""" Render response for view `delete_many` method (collection DELETE)
"""
if isinstance(value, dict):
return JHTTPOk(extra=value)
msg = 'Deleted {} {}(s) objects'.format(
value, system['view'].Model.__name__)
return JHTTPOk(msg, **common_kw.copy())
def render_update_many(self, value, system, common_kw):
""" Render response for view `update_many` method
(collection PUT/PATCH)
"""
msg = 'Updated {} {}(s) objects'.format(
value, system['view'].Model.__name__)
return JHTTPOk(msg, **common_kw.copy())
def _render_response(self, value, system):
""" Handle response rendering.
Calls mixin methods according to request.action value.
"""
super_call = super(DefaultResponseRendererMixin, self)._render_response
try:
method_name = 'render_{}'.format(system['request'].action)
except (KeyError, AttributeError):
return super_call(value, system)
method = getattr(self, method_name, None)
if method is not None:
common_kw = self._get_common_kwargs(system)
response = method(value, system, common_kw)
system['request'].response = response
return
return super_call(value, system)
class NefertariJsonRendererFactory(DefaultResponseRendererMixin,
JsonRendererFactory):
""" Special json renderer which will apply all after_calls(filters)
to the result.
"""
def run_after_calls(self, value, system):
request = system.get('request')
if request and hasattr(request, 'action'):
after_calls = getattr(request, 'filters', {})
for call in after_calls.get(request.action, []):
value = call(**dict(request=request, result=value))
return value
| apache-2.0 | -4,746,659,759,339,364,000 | 36.692308 | 79 | 0.610204 | false |
redondomarco/useradm | src/models/gestionssl.py | 1 | 1494 | def obtener_certificado(usuario, perfil):
"""devuelvo certificado, si no existe lo creo en seguinf"""
env.user = myconf.take('datos.seguinf_user')
env.warn_only = True
seguinf = FabricSupport()
comando='sudo ls -la /root/Clientes_ssl/'+str(usuario)+'-'+str(perfil)+'.p12'
seguinf.run(myconf.take('datos.seguinf_srv'),22,comando)
if "No existe el fichero o el directorio" in seguinf.result:
log(str(seguinf.result))
#creo el certificado
comando='sudo /usr/local/seguridad/bin/genero-cert '+str(usuario)+' '+str(perfil)
seguinf.run(myconf.take('datos.seguinf_srv'),22,comando)
#obtengo el certificado
seguinf.file_get(myconf.take('datos.seguinf_srv'),22,"/root/Clientes_ssl/","/tmp/",str(usuario)+"-"+str(perfil)+".p12")
#leo el certificado
with open("/tmp/"+str(usuario)+"-"+str(perfil)+".p12", 'rb') as f:
cert_p12 = f.read()
return cert_p12
def info_certificado(certificado):
from OpenSSL.crypto import load_pkcs12, FILETYPE_PEM, FILETYPE_ASN1
a = certificado
p = load_pkcs12(a, '')
certificate = p.get_certificate()
private_key = p.get_privatekey()
fields = certificate.get_subject().get_components()
resultado={}
for i in fields:
resultado[i[0]]=i[1]
return resultado
#para inspeccionar el resultado de obtener_certificado
#from OpenSSL.crypto import load_pkcs12, FILETYPE_PEM, FILETYPE_ASN1
#p = load_pkcs12(a, '')
#certificate = p.get_certificate()
#private_key = p.get_privatekey()
#fields = certificate.get_subject().get_components()
#print(fields)
| gpl-3.0 | 3,485,720,391,723,372,000 | 37.307692 | 120 | 0.718206 | false |
pizzathief/scipy | scipy/linalg/tests/test_decomp_cossin.py | 1 | 5753 | import pytest
import numpy as np
from numpy.random import seed
from numpy.testing import assert_allclose
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
from scipy.linalg import cossin, get_lapack_funcs
REAL_DTYPES = (np.float32, np.float64)
COMPLEX_DTYPES = (np.complex64, np.complex128)
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
@pytest.mark.parametrize('dtype_', DTYPES)
@pytest.mark.parametrize('m, p, q',
[
(2, 1, 1),
(3, 2, 1),
(3, 1, 2),
(4, 2, 2),
(4, 1, 2),
(40, 12, 20),
(40, 30, 1),
(40, 1, 30),
(100, 50, 1),
(100, 50, 50),
])
@pytest.mark.parametrize('swap_sign', [True, False])
def test_cossin(dtype_, m, p, q, swap_sign):
seed(1234)
if dtype_ in COMPLEX_DTYPES:
x = np.array(unitary_group.rvs(m), dtype=dtype_)
else:
x = np.array(ortho_group.rvs(m), dtype=dtype_)
u, cs, vh = cossin(x, p, q,
swap_sign=swap_sign)
assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
assert u.dtype == dtype_
# Test for float32 or float 64
assert cs.dtype == np.real(u).dtype
assert vh.dtype == dtype_
u, cs, vh = cossin([x[:p, :q], x[:p, q:], x[p:, :q], x[p:, q:]],
swap_sign=swap_sign)
assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
assert u.dtype == dtype_
assert cs.dtype == np.real(u).dtype
assert vh.dtype == dtype_
_, cs2, vh2 = cossin(x, p, q,
compute_u=False,
swap_sign=swap_sign)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(vh, vh2, rtol=0., atol=10*np.finfo(dtype_).eps)
u2, cs2, _ = cossin(x, p, q,
compute_vh=False,
swap_sign=swap_sign)
assert_allclose(u, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
_, cs2, _ = cossin(x, p, q,
compute_u=False,
compute_vh=False,
swap_sign=swap_sign)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
def test_cossin_mixed_types():
seed(1234)
x = np.array(ortho_group.rvs(4), dtype=np.float)
u, cs, vh = cossin([x[:2, :2],
np.array(x[:2, 2:], dtype=np.complex128),
x[2:, :2],
x[2:, 2:]])
assert u.dtype == np.complex128
assert cs.dtype == np.float64
assert vh.dtype == np.complex128
assert_allclose(x, u @ cs @ vh, rtol=0.,
atol=1e4 * np.finfo(np.complex128).eps)
def test_cossin_error_incorrect_subblocks():
with pytest.raises(ValueError, match="be due to missing p, q arguments."):
cossin(([1, 2], [3, 4, 5], [6, 7], [8, 9, 10]))
def test_cossin_error_empty_subblocks():
with pytest.raises(ValueError, match="x11.*empty"):
cossin(([], [], [], []))
with pytest.raises(ValueError, match="x12.*empty"):
cossin(([1, 2], [], [6, 7], [8, 9, 10]))
with pytest.raises(ValueError, match="x21.*empty"):
cossin(([1, 2], [3, 4, 5], [], [8, 9, 10]))
with pytest.raises(ValueError, match="x22.*empty"):
cossin(([1, 2], [3, 4, 5], [2], []))
def test_cossin_error_missing_partitioning():
with pytest.raises(ValueError, match=".*exactly four arrays.* got 2"):
cossin(unitary_group.rvs(2))
with pytest.raises(ValueError, match=".*might be due to missing p, q"):
cossin(unitary_group.rvs(4))
def test_cossin_error_non_iterable():
with pytest.raises(ValueError, match="containing the subblocks of X"):
cossin(12j)
def test_cossin_error_non_square():
with pytest.raises(ValueError, match="only supports square"):
cossin(np.array([[1, 2]]), 1, 1)
def test_cossin_error_partitioning():
x = np.array(ortho_group.rvs(4), dtype=np.float)
with pytest.raises(ValueError, match="invalid p=0.*0<p<4.*"):
cossin(x, 0, 1)
with pytest.raises(ValueError, match="invalid p=4.*0<p<4.*"):
cossin(x, 4, 1)
with pytest.raises(ValueError, match="invalid q=-2.*0<q<4.*"):
cossin(x, 1, -2)
with pytest.raises(ValueError, match="invalid q=5.*0<q<4.*"):
cossin(x, 1, 5)
@pytest.mark.parametrize("dtype_", DTYPES)
def test_cossin_separate(dtype_):
m, p, q = 250, 80, 170
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
X = np.array(X, dtype=dtype_)
drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'),[X])
lwval = _compute_lwork(dlw, m, p, q)
lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
'lrwork'],
lwval))
*_, theta, u1, u2, v1t, v2t, _ = \
drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
(u1_2, u2_2), theta2, (v1t_2, v2t_2) = cossin(X, p, q, separate=True)
assert_allclose(u1_2, u1, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(u2_2, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(v1t_2, v1t, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(v2t_2, v2t, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(theta2, theta, rtol=0., atol=10*np.finfo(dtype_).eps)
| bsd-3-clause | -8,984,024,263,995,868,000 | 36.357143 | 78 | 0.530506 | false |
ViciusChile/Scraper_Dealer | Metacritic.py | 1 | 7087 | from Scrapers.tools import tools
class MetacriticInfo:
"""Model for each elements"""
def __init__(self):
self.name = None
self.platform = None
self.developer = None
self.publisher = None
self.esrb = None
self.release = None
self.tags = None
self.metascore = None
self.official_site = None
self.description = None
self.num_players = None
self.sound = None
self.connectivity = None
self.resolution = None
self.num_online = None
self.customization = None
self.image_mini = None
self.url = None
self.url_amazon = None
class Metacritic:
"""Scrapping for www.metacritic.com"""
def __init__(self):
pass
@staticmethod
def scraper_dealer(url):
"""Get url index from one platform an letter
:param url: to scraping
http://www.metacritic.com/browse/games/title/ps4 /a
"""
url_pages = []
http = tools.get_html(url)
html = http[2]
http_code = http[1]
if html is not None:
pages = html.cssselect('#main > div.module.filter.alpha_filter > div.page_nav > div > div.pages > ul > li')
if pages:
q = len(pages)
else:
q = 1
for i in range(0, q):
url_pages.append('{0}?view=condensed&page={1}'.format(url, i))
return url_pages, http_code
@staticmethod
def scraper_links(url):
"""Get url index from one platform an letter
:param url: page with many links
:return: list of links urls
"""
urls = []
http = tools.get_html(url)
html = http[2]
http_code = http[1]
if html is not None:
links = html.cssselect(
'#main > div.module.filter.alpha_filter > div > div.body > div.body_wrap > div > ol > li > div > div.basic_stat.product_title > a')
if links:
for l in links:
urls.append(l.get('href') + '/details')
return urls, http_code
@staticmethod
def scraper_info(url):
"""Get all information of a game
:param url: game link
:return: class with all info
"""
http = tools.get_html(url)
page = http[2]
http_code = http[1]
product = MetacriticInfo()
name = page.cssselect('#main > div.content_head.product_content_head.game_content_head > div.product_title > a')
if not name:
name = page.cssselect('#main > div.content_head.product_content_head.game_content_head > h1 > a')
if name:
product.name = name[0].text_content().strip()
platform = page.cssselect('#main > div.content_head.product_content_head.game_content_head > div.product_title > span > a')
if not platform:
platform = page.cssselect('#main > div.content_head.product_content_head.game_content_head > h1 > span > a')
if platform:
platform = platform[0].text_content().strip()
product.platform = tools.clear_platform(platform).upper()
publisher = page.cssselect(
'#main > div.content_head.product_content_head.game_content_head > div.product_data > ul > li.summary_detail.publisher > span.data > a')
if publisher:
product.publisher = publisher[0].text_content().strip()
release = page.cssselect(
'#main > div.content_head.product_content_head.game_content_head > div.product_data > ul > li.summary_detail.release_data > span.data')
if release:
product.release = release[0].text_content().strip()
metascore = page.cssselect(
'#main > div.module.product_data > div > div.summary_wrap > div.section.product_scores > div.details.main_details > div > div > a > div > span')
if metascore:
product.metascore = metascore[0].text_content().strip()
product_description = page.cssselect(
'#main > div.module.product_data > div > div.summary_wrap > div.section.product_details > div > span.data')
if product_description:
product.description = product_description[0].text_content()
og_image = page.cssselect('meta[name="og:image"]')
if og_image:
product.image_mini = og_image[0].get('content')
product_details = page.cssselect('#main > div.product_details > table')
if product_details:
for i in product_details:
for e in i:
th = e.cssselect('th')
td = e.cssselect("td")
th_val = th[0].text_content().replace(":", "").strip()
td_val = td[0].text_content().strip()
if th_val == "Rating":
product.esrb = td_val
elif th_val == "Official Site":
product.official_site = td_val
elif th_val == "Developer":
product.developer = td_val
elif th_val == "Genre(s)":
product.tags = td_val
elif th_val == "Number of Players":
product.num_players = td_val
elif th_val == "Sound":
product.sound = td_val
elif th_val == "Connectivity":
product.connectivity = td_val
elif th_val == "Resolution":
product.resolution = td_val
elif th_val == "Number of Online Players":
product.num_online = td_val
elif th_val == "Customization":
product.customization = td_val
product_url = page.cssselect('#main > div.content_head.product_content_head.game_content_head > div.product_title > a')
if product_url:
product.url = product_url[0].get('href')
#url_amazon = page.cssselect('#main > div.module.product_data > div > div.summary_wrap > div.section.product_details > div.amazon_wrapper > a')
url_amazon = page.cssselect('#main > div.module.product_data > div > div.summary_wrap > div.section.product_details > div.esite_list > div.esite_items > div.esite_btn_wrapper > div.esite_btn > table > tr > td.esite_img_wrapper > a')
#print('url_amazon', url_amazon)
if url_amazon:
product.url_amazon = url_amazon[0].attrib['href']
return product, http_code
# ------------------------------------------------------------------------------ #
#from pprint import pprint
#pprint(Metacritic.scraper_pages('pc', letter))
#pprint(Metacritic.scraper_links('http://www.metacritic.com/browse/games/title/pc/u?view=condensed&page=1'))
#Metacritic.scraper_info('http://www.metacritic.com/game/playstation-4/fallout-4/details')
#platforms = ['ps4', 'xboxone', 'ps3', 'xbox360', 'pc', 'wii-u', '3ds', 'vita']
#letters = list('#' + string.ascii_lowercase)
| gpl-2.0 | 7,326,300,751,974,268,000 | 38.372222 | 240 | 0.550444 | false |
atwilc3000/buildroot-at91 | support/testing/infra/basetest.py | 1 | 2170 | import unittest
import os
import datetime
from infra.builder import Builder
from infra.emulator import Emulator
BASIC_TOOLCHAIN_CONFIG = \
"""
BR2_arm=y
BR2_TOOLCHAIN_EXTERNAL=y
BR2_TOOLCHAIN_EXTERNAL_CUSTOM=y
BR2_TOOLCHAIN_EXTERNAL_DOWNLOAD=y
BR2_TOOLCHAIN_EXTERNAL_URL="http://autobuild.buildroot.org/toolchains/tarballs/br-arm-full-2015.05-1190-g4a48479.tar.bz2"
BR2_TOOLCHAIN_EXTERNAL_GCC_4_7=y
BR2_TOOLCHAIN_EXTERNAL_HEADERS_3_10=y
BR2_TOOLCHAIN_EXTERNAL_LOCALE=y
# BR2_TOOLCHAIN_EXTERNAL_HAS_THREADS_DEBUG is not set
BR2_TOOLCHAIN_EXTERNAL_INET_RPC=y
BR2_TOOLCHAIN_EXTERNAL_CXX=y
"""
MINIMAL_CONFIG = \
"""
BR2_INIT_NONE=y
BR2_SYSTEM_BIN_SH_NONE=y
# BR2_PACKAGE_BUSYBOX is not set
# BR2_TARGET_ROOTFS_TAR is not set
"""
class BRTest(unittest.TestCase):
config = None
downloaddir = None
outputdir = None
logtofile = True
keepbuilds = False
jlevel = 0
timeout_multiplier = 1
def __init__(self, names):
super(BRTest, self).__init__(names)
self.testname = self.__class__.__name__
self.builddir = self.outputdir and os.path.join(self.outputdir, self.testname)
self.emulator = None
self.config = '\n'.join([line.lstrip() for line in
self.config.splitlines()]) + '\n'
self.config += "BR2_JLEVEL={}\n".format(self.jlevel)
def show_msg(self, msg):
print "{} {:40s} {}".format(datetime.datetime.now().strftime("%H:%M:%S"),
self.testname, msg)
def setUp(self):
self.show_msg("Starting")
self.b = Builder(self.config, self.builddir, self.logtofile)
if not self.keepbuilds:
self.b.delete()
if not self.b.is_finished():
self.show_msg("Building")
self.b.build()
self.show_msg("Building done")
self.emulator = Emulator(self.builddir, self.downloaddir,
self.logtofile, self.timeout_multiplier)
def tearDown(self):
self.show_msg("Cleaning up")
if self.emulator:
self.emulator.stop()
if self.b and not self.keepbuilds:
self.b.delete()
| gpl-2.0 | -4,470,137,271,963,074,600 | 29.138889 | 121 | 0.629954 | false |
pedrosacramento/inkscape-animation | spritesheet.py | 1 | 1674 | # This class handles sprite sheets
# This was taken from www.scriptefun.com/transcript-2-using
# sprite-sheets-and-drawing-the-background
# I've added some code to fail if the file wasn't found..
# Note: When calling images_at the rect is the format:
# (x, y, x + offset, y + offset)
import pygame
class spritesheet(object):
def __init__(self, filename):
pygame.display.set_caption("Inkscape Animation Preview")
try:
self.sheet = pygame.image.load(filename).convert()
except pygame.error, message:
print 'Unable to load spritesheet image:', filename
raise SystemExit, message
# Load a specific image from a specific rectangle
def image_at(self, rectangle, colorkey = None):
"Loads image from x,y,x+offset,y+offset"
rect = pygame.Rect(rectangle)
image = pygame.Surface(rect.size).convert()
image.blit(self.sheet, (0, 0), rect)
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image
# Load a whole bunch of images and return them as a list
def images_at(self, rects, colorkey = None):
"Loads multiple images, supply a list of coordinates"
return [self.image_at(rect, colorkey) for rect in rects]
# Load a whole strip of images
def load_strip(self, rect, image_count, colorkey = None):
"Loads a strip of images and returns them as a list"
tups = [(rect[0]+rect[2]*x, rect[1], rect[2], rect[3])
for x in range(image_count)]
return self.images_at(tups, colorkey)
| mit | -8,077,948,598,029,545,000 | 43.052632 | 64 | 0.639785 | false |
gaurav/phylo2owl | tests/test_shacl.py | 1 | 1141 | #!/usr/bin/env python
"""test_shacl.py: Test generated ontologies against SHACL shapes."""
import os
import libshacl
import pytest
def test_execute_testShacl():
""" Can we execute testShacl at all? """
(rc, stdout, stderr) = libshacl.exec_testShacl(["--version"])
print stdout
print stderr
assert rc == 0
assert stdout.startswith("testShacl ")
def test_validate_shacl_against_nodeshape(path_owl):
""" Execute testShacl on every OWL file against NodeShape.ttl. """
path_shacl = path_owl[:-3] + "shacl.ttl"
libshacl.validateShacl("tests/shapes/NodeShape.ttl", path_owl)
def test_validate_shacl_against_custom_shacl(path_owl):
""" Execute testShacl on the corresponding shacl.ttl file, if one exists. """
path_shacl = path_owl[:-3] + "shacl.ttl"
if os.path.isfile(path_shacl):
print "Validating {0} against its custom SHACL file, {1}".format(path_owl, path_shacl)
libshacl.validateShacl(path_shacl, path_owl)
else:
pytest.skip("OWL file '{0}' doesn't have a custom SHACL file to test at '{1}'".format(
path_owl,
path_shacl
))
| mit | 1,449,120,676,683,983,000 | 30.694444 | 94 | 0.659071 | false |
coala/coala | coalib/processes/communication/LogMessage.py | 1 | 1633 | from datetime import datetime
from coalib.output.printers.LOG_LEVEL import LOG_LEVEL
class LogMessage:
def __init__(self,
log_level,
*messages,
delimiter=' ',
timestamp=None):
if log_level not in LOG_LEVEL.reverse:
raise ValueError('log_level has to be a valid LOG_LEVEL.')
str_messages = [str(message) for message in messages]
self.message = str(delimiter).join(str_messages).rstrip()
if self.message == '':
raise ValueError('Empty log messages are not allowed.')
self.log_level = log_level
self.timestamp = datetime.today() if timestamp is None else timestamp
def __str__(self):
log_level = LOG_LEVEL.reverse.get(self.log_level, 'ERROR')
return f'[{log_level}] {self.message}'
def __eq__(self, other):
return (isinstance(other, LogMessage) and
other.log_level == self.log_level and
other.message == self.message)
def __ne__(self, other):
return not self.__eq__(other)
def to_string_dict(self):
"""
Makes a dictionary which has all keys and values as strings and
contains all the data that the LogMessage has.
:return: Dictionary with keys and values as string.
"""
retval = {}
retval['message'] = str(self.message)
retval['timestamp'] = ('' if self.timestamp is None
else self.timestamp.isoformat())
retval['log_level'] = str(LOG_LEVEL.reverse.get(self.log_level, ''))
return retval
| agpl-3.0 | 8,628,628,486,573,394,000 | 31.66 | 77 | 0.578077 | false |
Larhard/tsp | tsp/solver.py | 1 | 2482 | # Copyright (c) 2015, Bartlomiej Puget <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Bartlomiej Puget nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL BARTLOMIEJ PUGET BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import queue
import tsp.utils
def solve(vertices, tactic=None):
tactic = tactic or tsp.utils.euclid_distance
todo = queue.PriorityQueue()
todo.put((0, [vertices[0]], ))
min_cost = None
min_path = None
while todo.qsize() > 0:
expected_cost, path = todo.get()
cost = tsp.utils.path_length(path)
if len(vertices) == len(path):
total_cost = cost + tsp.utils.euclid_distance(path[-1], path[0])
if min_cost is None or min_cost > total_cost:
min_cost = total_cost
min_path = path
else:
for v in vertices:
if v not in path:
new_cost = cost + tactic(path[-1], v, vertices=vertices,
path=path)
if min_cost is None or new_cost < min_cost:
todo.put((new_cost, path + [v]))
return min_path
| bsd-3-clause | -8,710,665,200,263,397,000 | 39.688525 | 80 | 0.680097 | false |
abalakh/robottelo | robottelo/ui/navigator.py | 1 | 12312 | # -*- encoding: utf-8 -*-
"""Implements Navigator UI."""
from robottelo.ui.base import Base, UIError
from robottelo.ui.locators import menu_locators
class Navigator(Base):
"""Quickly navigate through menus and tabs."""
def menu_click(self, top_menu_locator, sub_menu_locator,
tertiary_menu_locator=None):
self.perform_action_chain_move(top_menu_locator)
if not tertiary_menu_locator:
self.click(sub_menu_locator)
else:
self.perform_action_chain_move(sub_menu_locator)
tertiary_element = self.wait_until_element(
tertiary_menu_locator)
self.browser.execute_script(
"arguments[0].click();",
tertiary_element,
)
self.wait_for_ajax()
def go_to_dashboard(self):
self.menu_click(
menu_locators['menu.monitor'], menu_locators['menu.dashboard'],
)
def go_to_content_dashboard(self):
self.menu_click(
menu_locators['menu.monitor'],
menu_locators['menu.content_dashboard'],
)
def go_to_reports(self):
self.menu_click(
menu_locators['menu.monitor'], menu_locators['menu.reports'],
)
def go_to_facts(self):
self.menu_click(
menu_locators['menu.monitor'], menu_locators['menu.facts'],
)
def go_to_statistics(self):
self.menu_click(
menu_locators['menu.monitor'], menu_locators['menu.statistics'],
)
def go_to_trends(self):
self.menu_click(
menu_locators['menu.monitor'], menu_locators['menu.trends'],
)
def go_to_audits(self):
self.menu_click(
menu_locators['menu.monitor'], menu_locators['menu.audits'],
)
def go_to_life_cycle_environments(self):
self.menu_click(
menu_locators['menu.content'],
menu_locators['menu.life_cycle_environments'],
)
def go_to_red_hat_subscriptions(self):
self.menu_click(
menu_locators['menu.content'],
menu_locators['menu.red_hat_subscriptions'],
)
def go_to_subscription_manager_applications(self):
self.menu_click(
menu_locators['menu.content'],
menu_locators['menu.subscription_manager_applications'],
)
def go_to_activation_keys(self):
self.menu_click(
menu_locators['menu.content'],
menu_locators['menu.activation_keys'],
)
def go_to_red_hat_repositories(self):
self.menu_click(
menu_locators['menu.content'],
menu_locators['menu.red_hat_repositories'],
)
def go_to_products(self):
self.menu_click(
menu_locators['menu.content'], menu_locators['menu.products'],
)
def go_to_gpg_keys(self):
self.menu_click(
menu_locators['menu.content'], menu_locators['menu.gpg_keys'],
)
def go_to_sync_status(self):
self.menu_click(
menu_locators['menu.content'], menu_locators['menu.sync_status'],
)
def go_to_sync_plans(self):
self.menu_click(
menu_locators['menu.content'], menu_locators['menu.sync_plans'],
)
def go_to_sync_schedules(self):
self.menu_click(
menu_locators['menu.content'],
menu_locators['menu.sync_schedules'],
)
def go_to_content_views(self):
self.menu_click(
menu_locators['menu.content'],
menu_locators['menu.content_views'],
)
def go_to_content_search(self):
self.menu_click(
menu_locators['menu.content'],
menu_locators['menu.content_search'],
)
def go_to_registries(self):
self.menu_click(
menu_locators['menu.containers'],
menu_locators['menu.registries'],
)
def go_to_hosts(self):
self.menu_click(
menu_locators['menu.hosts'], menu_locators['menu.all_hosts'],
)
def go_to_discovered_hosts(self):
self.menu_click(
menu_locators['menu.hosts'],
menu_locators['menu.discovered_hosts'],
)
def go_to_content_hosts(self):
self.menu_click(
menu_locators['menu.hosts'],
menu_locators['menu.content_hosts'],
)
def go_to_host_collections(self):
self.menu_click(
menu_locators['menu.hosts'],
menu_locators['menu.host_collections'],
)
def go_to_operating_systems(self):
self.menu_click(
menu_locators['menu.hosts'],
menu_locators['menu.operating_systems'],
)
def go_to_provisioning_templates(self):
self.menu_click(
menu_locators['menu.hosts'],
menu_locators['menu.provisioning_templates'],
)
def go_to_partition_tables(self):
self.menu_click(
menu_locators['menu.hosts'],
menu_locators['menu.partition_tables'],
)
def go_to_installation_media(self):
self.menu_click(
menu_locators['menu.hosts'],
menu_locators['menu.installation_media'],
)
def go_to_hardware_models(self):
self.menu_click(
menu_locators['menu.hosts'], menu_locators['menu.hardware_models'],
)
def go_to_architectures(self):
self.menu_click(
menu_locators['menu.hosts'], menu_locators['menu.architectures'],
)
def go_to_host_groups(self):
self.menu_click(
menu_locators['menu.configure'], menu_locators['menu.host_groups'],
)
def go_to_discovery_rules(self):
self.menu_click(
menu_locators['menu.configure'],
menu_locators['menu.discovery_rules'],
)
def go_to_global_parameters(self):
self.menu_click(
menu_locators['menu.configure'],
menu_locators['menu.global_parameters'],
)
def go_to_environments(self):
self.menu_click(
menu_locators['menu.configure'],
menu_locators['menu.environments'],
)
def go_to_puppet_classes(self):
self.menu_click(
menu_locators['menu.configure'],
menu_locators['menu.puppet_classes'],
)
def go_to_smart_variables(self):
self.menu_click(
menu_locators['menu.configure'],
menu_locators['menu.smart_variables'],
)
def go_to_config_groups(self):
self.menu_click(
menu_locators['menu.configure'],
menu_locators['menu.configure_groups']
)
def go_to_smart_proxies(self):
self.menu_click(
menu_locators['menu.infrastructure'],
menu_locators['menu.smart_proxies'],
)
def go_to_compute_resources(self):
self.menu_click(
menu_locators['menu.infrastructure'],
menu_locators['menu.compute_resources'],
)
def go_to_compute_profiles(self):
self.menu_click(
menu_locators['menu.infrastructure'],
menu_locators['menu.compute_profiles'],
)
def go_to_subnets(self):
self.menu_click(
menu_locators['menu.infrastructure'],
menu_locators['menu.subnets'],
)
def go_to_domains(self):
self.menu_click(
menu_locators['menu.infrastructure'],
menu_locators['menu.domains'],
)
def go_to_ldap_auth(self):
self.menu_click(
menu_locators['menu.administer'], menu_locators['menu.ldap_auth'],
)
def go_to_users(self):
self.menu_click(
menu_locators['menu.administer'], menu_locators['menu.users'],
)
def go_to_user_groups(self):
self.menu_click(
menu_locators['menu.administer'],
menu_locators['menu.user_groups'],
)
def go_to_roles(self):
self.menu_click(
menu_locators['menu.administer'], menu_locators['menu.roles'],
)
def go_to_bookmarks(self):
self.menu_click(
menu_locators['menu.administer'], menu_locators['menu.bookmarks'],
)
def go_to_settings(self):
self.menu_click(
menu_locators['menu.administer'], menu_locators['menu.settings'],
)
def go_to_about(self):
self.menu_click(
menu_locators['menu.administer'], menu_locators['menu.about'],
)
def go_to_sign_out(self):
self.menu_click(
menu_locators['menu.account'], menu_locators['menu.sign_out'],
)
def go_to_my_account(self):
self.menu_click(
menu_locators['menu.account'], menu_locators['menu.my_account'],
)
def go_to_org(self):
self.menu_click(
menu_locators['menu.any_context'], menu_locators['org.manage_org'],
)
def go_to_loc(self):
self.menu_click(
menu_locators['menu.any_context'], menu_locators['loc.manage_loc'],
)
def go_to_logout(self):
self.menu_click(
menu_locators['menu.account'], menu_locators['menu.sign_out'],
)
def go_to_insights_overview(self):
"""Navigates to Red Hat Access Insights Overview"""
self.menu_click(
menu_locators['menu.insights'],
menu_locators['insights.overview']
)
def go_to_insights_rules(self):
"""Navigates to Red Hat Access Insights Rules"""
self.menu_click(
menu_locators['menu.insights'],
menu_locators['insights.rules'],
)
def go_to_insights_systems(self):
""" Navigates to Red Hat Access Insights Systems"""
self.menu_click(
menu_locators['menu.insights'],
menu_locators['insights.systems'],
)
def go_to_insights_manage(self):
""" Navigates to Red Hat Access Insights Manage Systems"""
self.menu_click(
menu_locators['menu.insights'],
menu_locators['insights.manage'],
)
def go_to_oscap_policy(self):
""" Navigates to Oscap Policy"""
self.menu_click(
menu_locators['menu.hosts'],
menu_locators['menu.oscap_policy'],
)
def go_to_oscap_content(self):
"""Navigates to Oscap Content"""
self.menu_click(
menu_locators['menu.hosts'],
menu_locators['menu.oscap_content'],
)
def go_to_oscap_reports(self):
"""Navigates to Oscap Reports"""
self.menu_click(
menu_locators['menu.hosts'],
menu_locators['menu.oscap_reports'],
)
def go_to_select_org(self, org):
"""Selects the specified organization.
:param str org: The organization to select.
:return: Returns the organization.
:rtype: str
"""
strategy, value = menu_locators['org.select_org']
self.menu_click(
menu_locators['menu.any_context'],
menu_locators['org.nav_current_org'],
(strategy, value % org),
)
self.perform_action_chain_move(menu_locators['menu.current_text'])
if self.wait_until_element(
menu_locators['menu.fetch_org']).text != org:
raise UIError(
u'Could not select the organization: {0}'.format(org)
)
self.wait_for_ajax()
return org
def go_to_select_loc(self, loc):
"""Selects the specified location.
:param str loc: The location to select.
:return: Returns the location.
:rtype: str
"""
strategy, value = menu_locators['loc.select_loc']
self.menu_click(
menu_locators['menu.any_context'],
menu_locators['loc.nav_current_loc'],
(strategy, value % loc),
)
self.perform_action_chain_move(menu_locators['menu.current_text'])
if self.wait_until_element(
menu_locators['menu.fetch_loc']).text != loc:
raise UIError(
u'Could not select the location: {0}'.format(loc)
)
self.wait_for_ajax()
return loc
| gpl-3.0 | 7,753,651,298,242,059,000 | 28.52518 | 79 | 0.556936 | false |
OmeGak/indico | indico/modules/attachments/models/folders_test.py | 1 | 1890 | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.modules.attachments import AttachmentFolder
def test_update_principal(dummy_user, dummy_event):
folder = AttachmentFolder(object=dummy_event, is_default=True)
assert not folder.acl_entries
# not changing anything -> shouldn't be added to acl
entry = folder.update_principal(dummy_user)
assert entry is None
assert not folder.acl_entries
# adding user with read access -> new acl entry since the user isn't in there yet
entry = initial_entry = folder.update_principal(dummy_user, read_access=True)
assert folder.acl_entries == {entry}
# not changing anything on existing principal -> shouldn't modify acl
entry = folder.update_principal(dummy_user)
assert entry is initial_entry
assert folder.acl_entries == {entry}
# granting permission which is already present -> shouldn't modify acl
entry = folder.update_principal(dummy_user, read_access=True)
assert entry is initial_entry
assert folder.acl_entries == {entry}
# removing read access -> acl entry is removed
entry = folder.update_principal(dummy_user, read_access=False)
assert entry is None
assert not folder.acl_entries
def test_remove_principal(dummy_user, dummy_event):
folder = AttachmentFolder(object=dummy_event, is_default=True)
assert not folder.acl_entries
entry = folder.update_principal(dummy_user, read_access=True)
assert folder.acl_entries == {entry}
folder.remove_principal(dummy_user)
assert not folder.acl_entries
# doesn't do anything but must not fail either
folder.remove_principal(dummy_user)
assert not folder.acl_entries
| mit | 6,373,904,373,456,727,000 | 40.086957 | 85 | 0.732275 | false |
galeone/dynamic-training-bench | dytb/evaluators/AutoencoderEvaluator.py | 1 | 1413 | #Copyright (C) 2017 Paolo Galeone <[email protected]>
#
#This Source Code Form is subject to the terms of the Mozilla Public
#License, v. 2.0. If a copy of the MPL was not distributed with this
#file, you can obtain one at http://mozilla.org/MPL/2.0/.
#Exhibit B is not attached; this software is compatible with the
#licenses expressed under Section 1.12 of the MPL v2.
""" Evaluate Autoencoding models """
from .Evaluator import Evaluator
class AutoencoderEvaluator(Evaluator):
"""AutoencoderEvaluator is the evaluation object for a Autoencoder model"""
@property
def metrics(self):
"""Returns a list of dict with keys:
{
"fn": function
"name": name
"positive_trend_sign": sign that we like to see when things go well
"model_selection": boolean, True if the metric has to be measured to select the model
"average": boolean, true if the metric should be computed as average over the batches.
If false the results over the batches are just added
"tensorboard": boolean. True if the metric is a scalar and can be logged in tensoboard
}
"""
return [{
"fn": self._model.loss,
"name": "error",
"positive_trend_sign": -1,
"model_selection": True,
"average": True,
"tensorboard": True,
}]
| mpl-2.0 | -1,403,732,228,782,369,000 | 38.25 | 98 | 0.625619 | false |
hsfzxjy/wisecitymbc | common/rest/decorators.py | 1 | 1087 | from django.utils.decorators import method_decorator
from django.core.cache import cache
from django.http import HttpResponse
def cache_view_func(func):
def wrapper(request, *args, **kwargs):
key = request.method + request.META['PATH_INFO'] + request.META['QUERY_STRING']
content = cache.get(key)
if content is None:
response = func(request, *args, **kwargs)
cache.set(key, response.rendered_content, 30)
return response
else:
return HttpResponse(content)
return wrapper
def cache_view_method(func):
def wrapper(self, request, *args, **kwargs):
key = request.method + request.META['PATH_INFO'] + request.META['QUERY_STRING']
content = cache.get(key)
if content is None:
response = func(self, request, *args, **kwargs)
self.finalize_response(request, response, *args, **kwargs)
cache.set(key, response.rendered_content, 30)
return response
else:
return HttpResponse(content)
return wrapper | gpl-2.0 | 774,915,620,516,630,300 | 31 | 87 | 0.624655 | false |
andrew-lundgren/gwpy | gwpy/signal/filter.py | 1 | 3049 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2016)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Extensions to `scipy.signal.signaltools`.
"""
from numpy import (asarray, reshape)
from scipy.signal.signaltools import (sosfilt, sosfilt_zi)
from scipy.signal._arraytools import (axis_slice, axis_reverse, odd_ext,
even_ext, const_ext)
__author__ = 'Duncan Macleod <[email protected]>'
__all__ = ['sosfiltfilt']
def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=0):
x = asarray(x)
# `method` is "pad"
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
edge = sos.shape[0] * 6
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = sosfilt_zi(sos)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
zix0 = reshape(zi * x0, (sos.shape[0], 2))
# Forward filter
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zix0)
# Backward filter
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
ziy0 = reshape(zi * y0, (sos.shape[0], 2))
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=ziy0)
# Reverse y
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
| gpl-3.0 | 2,693,603,008,467,231,000 | 31.43617 | 77 | 0.610692 | false |
sharkykh/SickRage | sickbeard/providers/elitetorrent.py | 1 | 7522 | # coding=utf-8
# Author: CristianBB
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
import time
import traceback
import six
import sickbeard
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickbeard.common import cpu_presets
from sickrage.helper.common import try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class EliteTorrentProvider(TorrentProvider):
def __init__(self):
TorrentProvider.__init__(self, "EliteTorrent")
self.onlyspasearch = None
self.minseed = None
self.minleech = None
self.cache = tvcache.TVCache(self) # Only poll EliteTorrent every 20 minutes max
self.urls = {
'base_url': 'http://www.elitetorrent.net',
'search': 'http://www.elitetorrent.net/torrents.php'
}
self.url = self.urls['base_url']
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
lang_info = '' if not ep_obj or not ep_obj.show else ep_obj.show.lang
"""
Search query:
http://www.elitetorrent.net/torrents.php?cat=4&modo=listado&orden=fecha&pag=1&buscar=fringe
cat = 4 => Shows
modo = listado => display results mode
orden = fecha => order
buscar => Search show
pag = 1 => page number
"""
search_params = {
'cat': 4,
'modo': 'listado',
'orden': 'fecha',
'pag': 1,
'buscar': ''
}
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
# Only search if user conditions are true
if self.onlyspasearch and lang_info != 'es' and mode != 'RSS':
logger.log("Show info is not spanish, skipping provider search", logger.DEBUG)
continue
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_string = re.sub(r'S0*(\d*)E(\d*)', r'\1x\2', search_string)
search_params['buscar'] = search_string.strip() if mode != 'RSS' else ''
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
continue
try:
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', class_='fichas-listado')
torrent_rows = torrent_table('tr') if torrent_table else []
if len(torrent_rows) < 2:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for row in torrent_rows[1:]:
try:
download_url = self.urls['base_url'] + row.find('a')['href']
"""
Trick for accents for this provider.
- data = self.get_url(self.urls['search'], params=search_params, returns='text') -
returns latin1 coded text and this makes that the title used for the search
and the title retrieved from the parsed web page doesn't match so I get
"No needed episodes found during backlog search for: XXXX"
This is not the best solution but it works.
First encode latin1 and then decode utf8 to remains six.text_type
"""
row_title = row.find('a', class_='nombre')['title']
title = self._processTitle(row_title.encode('latin-1').decode('utf8'))
seeders = try_int(row.find('td', class_='semillas').get_text(strip=True))
leechers = try_int(row.find('td', class_='clientes').get_text(strip=True))
#seeders are not well reported. Set 1 in case of 0
seeders = max(1, seeders)
# Provider does not provide size
size = -1
except (AttributeError, TypeError, KeyError, ValueError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except Exception:
logger.log("Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.WARNING)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
@staticmethod
def _processTitle(title):
# Quality, if no literal is defined it's HDTV
if 'calidad' not in title:
title += ' HDTV x264'
title = title.replace('(calidad baja)', 'HDTV x264')
title = title.replace('(Buena calidad)', '720p HDTV x264')
title = title.replace('(Alta calidad)', '720p HDTV x264')
title = title.replace('(calidad regular)', 'DVDrip x264')
title = title.replace('(calidad media)', 'DVDrip x264')
# Language, all results from this provider have spanish audio, we append it to title (avoid to download undesired torrents)
title += ' SPANISH AUDIO'
title += '-ELITETORRENT'
return title.strip()
provider = EliteTorrentProvider()
| gpl-3.0 | -8,951,345,537,618,904,000 | 39.659459 | 149 | 0.539484 | false |
mdklatt/serial-python | src/serial/core/sort.py | 1 | 3047 | """ Sorted input and output.
"""
from collections import deque
from operator import itemgetter
from .buffer import _ReaderBuffer
from .buffer import _WriterBuffer
__all__ = "SortReader", "SortWriter"
class _Sorter(object):
""" Abstract base class for SortReader and SortWriter.
"""
def __init__(self, key, group=None):
""" Initialize this object.
The key argument determines sort order and is either a single field
name, a sequence of names, or a key function that returns a key value.
The optional group argument is like the key argument but is used to
group records that are already partially sorted. Records will be sorted
within each group rather than as a single sequence. If the groups are
small relative to the total sequence length this can significantly
improve performance and memory usage.
"""
def keyfunc(key):
""" Create a key function. """
if not key or callable(key):
return key
if isinstance(key, str):
key = (key,)
return itemgetter(*key)
self._get_key = keyfunc(key)
self._get_group = keyfunc(group)
self._group = None
self._buffer = []
self._output = None # initialized by derived classes
return
def _queue(self, record):
""" Process each incoming record.
"""
if self._get_group:
group = self._get_group(record)
if group != self._group:
# This is a new group; process the previous group.
self._flush()
self._group = group
self._buffer.append(record)
return
def _flush(self):
""" Send sorted records to the output queue.
"""
if not self._buffer:
return
self._buffer.sort(key=self._get_key)
self._output = deque(self._buffer)
self._buffer = []
return
class SortReader(_Sorter, _ReaderBuffer):
""" Sort input from another reader.
"""
def __init__(self, reader, key, group=None):
""" Initialize this object.
"""
_Sorter.__init__(self, key, group)
_ReaderBuffer.__init__(self, reader)
return
def _uflow(self):
""" Handle an underflow condition.
This is called when the input reader is exhausted and there are no
records in the output queue.
"""
if not self._buffer:
# All data has been output.
raise StopIteration
self._flush()
return
class SortWriter(_Sorter, _WriterBuffer):
""" Sort output for another writer.
"""
def __init__(self, writer, key, group=None):
""" Initialize this object.
"""
_Sorter.__init__(self, key, group)
_WriterBuffer.__init__(self, writer)
return
| mit | -4,089,957,522,293,261,300 | 27.476636 | 80 | 0.54808 | false |
twitter/pycascading | examples/subassembly.py | 1 | 1199 | #
# Copyright 2011 Twitter, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Example demonstrating the use of predefined subassemblies.
Useful aggregators, subassemblies, pipes available in Cascading are imported
into PyCascading by native.py
"""
from pycascading.helpers import *
def main():
flow = Flow()
repeats = flow.source(Hfs(TextDelimited(Fields(['col1', 'col2']), ' ',
[String, Integer]),
'pycascading_data/repeats.txt'))
output = flow.tsv_sink('pycascading_data/out')
# This selects the distinct records considering all fields
repeats | native.unique(Fields.ALL) | output
flow.run()
| apache-2.0 | -7,289,176,963,484,494,000 | 33.257143 | 76 | 0.69558 | false |
jdeguire/pjcontroller | software/updatepage.py | 1 | 4410 | #! /usr/bin/env python
#
# Copyright 2011-2013 Jesse DeGuire
#
# This file is part of Projector Controller.
#
# Projector Controller is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Projector Controller is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with Projector Controller. If not, see <http://www.gnu.org/licenses/>
"""
File: updatepage.py
Author: Jesse DeGuire
Contains the UpdatePage class.
"""
import os
import hashlib
from PySide import QtCore
from PySide.QtCore import *
from PySide.QtGui import *
from connmanager import ConnectionManager
class UpdatePage(QDialog):
"""The page used for performing firmware updates to the device.
"""
# new signals have to be declared out here, something the docs aren't very explicit about
updatestartclicked = QtCore.Signal(str)
def __init__(self, connmgr):
QDialog.__init__(self)
# widgets in the dialog box
self.fileline = QLineEdit()
self.fileline.setPlaceholderText('Select hex file...')
self.browsebutton = QPushButton('...')
# Set the appropriate size manually since the "standard" size is too big.
# It seems that buttons get a 10 pixel pad on each side.
browsefw = self.browsebutton.fontMetrics().width(self.browsebutton.text())
if browsefw > 15:
self.browsebutton.setFixedWidth(browsefw + 20)
else:
self.browsebutton.setFixedWidth(35)
self.hashlabel = QLabel("MD5 Sum")
self.hashline = QLineEdit()
self.hashline.setPlaceholderText('No file selected')
self.hashline.setReadOnly(True)
self.startbutton = QPushButton('Start')
self.startbutton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.progress = QProgressBar()
self.progress.setRange(0, 100)
self.progress.setFixedWidth(100)
# so our file dialog remembers where we last were (default to home directory)
self.lasthexdir = os.path.expanduser('~')
# set up external connections
connmgr.addSignal(self.updatestartclicked, 'StartUpdate')
connmgr.addSlot(self.setUpdateProgress, 'UpdateProgressed')
connmgr.addSlot(self.endUpdate, 'UpdateCompleted')
# connect signals to internal slots
self.browsebutton.clicked.connect(self.browseForHexFile)
self.startbutton.clicked.connect(self.startNewUpdate)
# set up our control layout
self.vbox = QVBoxLayout(self)
self.filehbox = QHBoxLayout()
self.starthbox = QHBoxLayout()
self.vbox.setAlignment(Qt.AlignCenter)
self.vbox.addLayout(self.filehbox)
self.filehbox.addWidget(self.fileline)
self.filehbox.addWidget(self.browsebutton)
self.vbox.addLayout(self.starthbox)
self.starthbox.setAlignment(Qt.AlignLeft)
self.starthbox.addWidget(self.startbutton)
self.starthbox.addWidget(self.progress)
self.vbox.addSpacing(10)
self.vbox.addWidget(self.hashlabel)
self.vbox.addWidget(self.hashline)
@QtCore.Slot()
def browseForHexFile(self):
hexpath = QFileDialog.getOpenFileName(self, 'Select hex file', self.lasthexdir,
'Intel hex files (*.hex);;All Files (*)')
if hexpath[0] != '':
self.fileline.setText(hexpath[0])
self.lasthexdir = os.path.dirname(hexpath[0])
h = hashlib.md5()
with open(hexpath[0], 'r') as hexfile:
for line in hexfile:
h.update(line)
self.hashline.setText(h.hexdigest())
@QtCore.Slot()
def startNewUpdate(self):
self.progress.reset()
self.updatestartclicked.emit(self.fileline.text())
@QtCore.Slot(int)
def setUpdateProgress(self, prog):
self.progress.setValue(prog)
@QtCore.Slot(bool)
def endUpdate(self, result):
self.progress.reset()
| gpl-3.0 | -2,336,973,511,080,071,700 | 33.186047 | 93 | 0.66644 | false |
1844144/django-blog-zinnia | zinnia/tests/implementations/settings.py | 1 | 1200 | """Settings for testing zinnia"""
import os
from zinnia.xmlrpc import ZINNIA_XMLRPC_METHODS
SITE_ID = 1
USE_TZ = True
STATIC_URL = '/static/'
SECRET_KEY = 'secret-key'
ROOT_URLCONF = 'zinnia.tests.implementions.urls.default'
LOCALE_PATHS = [os.path.join(os.path.dirname(__file__), 'locale')]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.SHA1PasswordHasher'
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
]
TEMPLATE_CONTEXT_PROCESSORS = [
'django.core.context_processors.request',
'zinnia.context_processors.version'
]
TEMPLATE_LOADERS = [
['django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader']
]
]
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.auth',
'django_comments',
'django_xmlrpc',
'mptt',
'tagging',
'south',
'zinnia'
]
ZINNIA_PAGINATION = 3
XMLRPC_METHODS = ZINNIA_XMLRPC_METHODS
| bsd-3-clause | 9,107,452,346,140,057,000 | 20.818182 | 66 | 0.694167 | false |
benkirk/mpi_playground | mpi4py/tests/test_rma.py | 1 | 15531 | from mpi4py import MPI
import mpiunittest as unittest
import arrayimpl
import sys
pypy_lt_53 = (hasattr(sys, 'pypy_version_info') and
sys.pypy_version_info < (5, 3))
def mkzeros(n):
if pypy_lt_53:
return b'\0' * n
return bytearray(n)
def memzero(m):
try:
m[:] = 0
except IndexError: # cffi buffer
m[0:len(m)] = b'\0'*len(m)
class BaseTestRMA(object):
COMM = MPI.COMM_NULL
INFO = MPI.INFO_NULL
def setUp(self):
nbytes = 100*MPI.DOUBLE.size
try:
self.mpi_memory = MPI.Alloc_mem(nbytes)
self.memory = self.mpi_memory
memzero(self.memory)
except MPI.Exception:
import array
self.mpi_memory = None
self.memory = array.array('B',[0]*nbytes)
self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM)
def tearDown(self):
self.WIN.Free()
if self.mpi_memory:
MPI.Free_mem(self.mpi_memory)
def testPutGet(self):
typemap = MPI._typedict
group = self.WIN.Get_group()
size = group.Get_size()
group.Free()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
for count in range(10):
for rank in range(size):
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
#
self.WIN.Fence()
self.WIN.Put(sbuf.as_mpi(), rank)
self.WIN.Fence()
self.WIN.Get(rbuf.as_mpi_c(count), rank)
self.WIN.Fence()
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertNotEqual(rbuf[i], -1)
self.assertEqual(rbuf[-1], -1)
#
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
target = sbuf.itemsize
self.WIN.Fence()
self.WIN.Put(sbuf.as_mpi(), rank, target)
self.WIN.Fence()
self.WIN.Get(rbuf.as_mpi_c(count), rank, target)
self.WIN.Fence()
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertNotEqual(rbuf[i], -1)
self.assertEqual(rbuf[-1], -1)
#
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
datatype = typemap[typecode]
target = (sbuf.itemsize, count, datatype)
self.WIN.Fence()
self.WIN.Put(sbuf.as_mpi(), rank, target)
self.WIN.Fence()
self.WIN.Get(rbuf.as_mpi_c(count), rank, target)
self.WIN.Fence()
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertNotEqual(rbuf[i], -1)
self.assertEqual(rbuf[-1], -1)
def testAccumulate(self):
group = self.WIN.Get_group()
size = group.Get_size()
group.Free()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
for count in range(10):
for rank in range(size):
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
self.WIN.Fence()
self.WIN.Accumulate(sbuf.as_mpi(), rank, op=op)
self.WIN.Fence()
self.WIN.Get(rbuf.as_mpi_c(count), rank)
self.WIN.Fence()
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertNotEqual(rbuf[i], -1)
self.assertEqual(rbuf[-1], -1)
@unittest.skipMPI('openmpi(>=1.10,<1.11)')
def testGetAccumulate(self):
group = self.WIN.Get_group()
size = group.Get_size()
rank = group.Get_rank()
group.Free()
self.WIN.Fence()
obuf = MPI.Alloc_mem(1); memzero(obuf)
rbuf = MPI.Alloc_mem(1); memzero(rbuf)
try:
try:
self.WIN.Get_accumulate([obuf, 0, MPI.BYTE], [rbuf, 0, MPI.BYTE], rank)
finally:
MPI.Free_mem(obuf)
MPI.Free_mem(rbuf)
except NotImplementedError:
self.skipTest('mpi-win-get_accumulate')
self.WIN.Fence()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
for count in range(10):
for rank in range(size):
ones = array([1]*count, typecode)
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
gbuf = array(-1, typecode, count+1)
for op in (MPI.SUM, MPI.PROD,
MPI.MAX, MPI.MIN,
MPI.REPLACE, MPI.NO_OP):
self.WIN.Lock(rank)
self.WIN.Put(ones.as_mpi(), rank)
self.WIN.Flush(rank)
self.WIN.Get_accumulate(sbuf.as_mpi(),
rbuf.as_mpi_c(count),
rank, op=op)
self.WIN.Flush(rank)
self.WIN.Get(gbuf.as_mpi_c(count), rank)
self.WIN.Flush(rank)
self.WIN.Unlock(rank)
#
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertEqual(rbuf[i], 1)
self.assertEqual(gbuf[i], op(1, i))
self.assertEqual(rbuf[-1], -1)
self.assertEqual(gbuf[-1], -1)
def testFetchAndOp(self):
group = self.WIN.Get_group()
size = group.Get_size()
rank = group.Get_rank()
group.Free()
self.WIN.Fence()
obuf = MPI.Alloc_mem(1); memzero(obuf)
rbuf = MPI.Alloc_mem(1); memzero(rbuf)
try:
try:
self.WIN.Fetch_and_op([obuf, 1, MPI.BYTE], [rbuf, 1, MPI.BYTE], rank)
finally:
MPI.Free_mem(obuf)
MPI.Free_mem(rbuf)
except NotImplementedError:
self.skipTest('mpi-win-fetch_and_op')
self.WIN.Fence()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
obuf = array(+1, typecode)
rbuf = array(-1, typecode, 2)
for op in (MPI.SUM, MPI.PROD,
MPI.MAX, MPI.MIN,
MPI.REPLACE, MPI.NO_OP):
for rank in range(size):
for disp in range(3):
self.WIN.Lock(rank)
self.WIN.Fetch_and_op(obuf.as_mpi(),
rbuf.as_mpi_c(1),
rank, disp, op=op)
self.WIN.Unlock(rank)
self.assertEqual(rbuf[1], -1)
def testCompareAndSwap(self):
group = self.WIN.Get_group()
size = group.Get_size()
rank = group.Get_rank()
group.Free()
self.WIN.Fence()
obuf = MPI.Alloc_mem(1); memzero(obuf)
cbuf = MPI.Alloc_mem(1); memzero(cbuf)
rbuf = MPI.Alloc_mem(1); memzero(rbuf)
try:
try:
self.WIN.Compare_and_swap([obuf, 1, MPI.BYTE],
[cbuf, 1, MPI.BYTE],
[rbuf, 1, MPI.BYTE],
rank, 0)
finally:
MPI.Free_mem(obuf)
MPI.Free_mem(cbuf)
MPI.Free_mem(rbuf)
except NotImplementedError:
self.skipTest('mpi-win-compare_and_swap')
self.WIN.Fence()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
if typecode in 'fdg': continue
obuf = array(+1, typecode)
cbuf = array( 0, typecode)
rbuf = array(-1, typecode, 2)
for rank in range(size):
for disp in range(3):
self.WIN.Lock(rank)
self.WIN.Compare_and_swap(obuf.as_mpi(),
cbuf.as_mpi(),
rbuf.as_mpi_c(1),
rank, disp)
self.WIN.Unlock(rank)
self.assertEqual(rbuf[1], -1)
def testPutProcNull(self):
self.WIN.Fence()
self.WIN.Put(None, MPI.PROC_NULL, None)
self.WIN.Fence()
def testGetProcNull(self):
self.WIN.Fence()
self.WIN.Get(None, MPI.PROC_NULL, None)
self.WIN.Fence()
def testAccumulateProcNullReplace(self):
self.WIN.Fence()
zeros = mkzeros(8)
self.WIN.Fence()
self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE)
self.WIN.Fence()
self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE)
self.WIN.Fence()
def testAccumulateProcNullSum(self):
self.WIN.Fence()
zeros = mkzeros(8)
self.WIN.Fence()
self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.SUM)
self.WIN.Fence()
self.WIN.Accumulate([None, MPI.INT], MPI.PROC_NULL, None, MPI.SUM)
self.WIN.Fence()
def testGetAccumulateProcNull(self):
obuf = [mkzeros(8), 0, MPI.INT]
rbuf = [mkzeros(8), 0, MPI.INT]
self.WIN.Fence()
try:
self.WIN.Get_accumulate(obuf, rbuf, MPI.PROC_NULL)
except NotImplementedError:
self.skipTest('mpi-win-get_accumulate')
self.WIN.Fence()
##def testFetchAndOpProcNull(self):
## obuf = cbuf = rbuf = None
## self.WIN.Fence()
## try:
## self.WIN.Fetch_and_op(obuf, rbuf, MPI.PROC_NULL, 0)
## except NotImplementedError:
## self.skipTest('mpi-win-fetch_and_op')
## self.WIN.Fence()
##def testCompareAndSwapProcNull(self):
## obuf = cbuf = rbuf = None
## self.WIN.Fence()
## try:
## self.WIN.Compare_and_swap(obuf, cbuf, rbuf, MPI.PROC_NULL, 0)
## except NotImplementedError:
## self.skipTest('mpi-win-compare_and_swap')
## self.WIN.Fence()
def testFence(self):
win = self.WIN
LMODE = [0, MPI.MODE_NOSTORE, MPI.MODE_NOPUT,
MPI.MODE_NOSTORE|MPI.MODE_NOPUT]
GMODE = [0, MPI.MODE_NOPRECEDE, MPI.MODE_NOSUCCEED]
win.Fence()
for lmode in LMODE:
for gmode in GMODE:
assertion = lmode | gmode
win.Fence(assertion)
win.Fence()
@unittest.skipMPI('openmpi(==1.8.1)')
def testFenceAll(self):
win = self.WIN
assertion = 0
modes = [0,
MPI.MODE_NOSTORE,
MPI.MODE_NOPUT,
MPI.MODE_NOPRECEDE,
MPI.MODE_NOSUCCEED]
win.Fence()
for mode in modes:
win.Fence(mode)
assertion |= mode
win.Fence(assertion)
win.Fence()
@unittest.skipMPI('openmpi(==1.8.6)')
def testStartComplete(self):
self.WIN.Start(MPI.GROUP_EMPTY)
self.WIN.Complete()
@unittest.skipMPI('openmpi(==1.8.6)')
def testPostWait(self):
self.WIN.Post(MPI.GROUP_EMPTY)
self.WIN.Wait()
@unittest.skipMPI('openmpi(==1.8.7)')
@unittest.skipMPI('openmpi(==1.8.6)')
def testStartCompletePostWait(self):
win = self.WIN
wingroup = win.Get_group()
size = wingroup.Get_size()
rank = wingroup.Get_rank()
if size < 2: return wingroup.Free()
if rank == 0:
group = wingroup.Excl([0])
win.Start(group)
win.Complete()
win.Post(group)
win.Wait()
group.Free()
else:
group = wingroup.Incl([0])
win.Post(group)
win.Wait()
win.Start(group)
win.Complete()
group.Free()
wingroup.Free()
@unittest.skipMPI('openmpi(==1.8.7)')
@unittest.skipMPI('openmpi(==1.8.6)')
def testStartCompletePostTest(self):
comm = self.COMM
win = self.WIN
wingroup = win.Get_group()
size = wingroup.Get_size()
rank = wingroup.Get_rank()
if size < 2: return wingroup.Free()
if rank == 0:
group = wingroup.Excl([0])
win.Start(group)
comm.Barrier()
win.Complete()
comm.Barrier()
group.Free()
else:
group = wingroup.Incl([0])
win.Post(group)
flag = win.Test()
self.assertFalse(flag)
comm.Barrier()
comm.Barrier()
flag = win.Test()
self.assertTrue(flag)
group.Free()
wingroup.Free()
@unittest.skipMPI('MPI(<3.0)')
def testSync(self):
win = self.WIN
comm = self.COMM
rank = comm.Get_rank()
win.Lock(rank)
win.Sync()
win.Unlock(rank)
comm.Barrier()
@unittest.skipMPI('MPI(<3.0)')
def testFlush(self):
win = self.WIN
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
#
for i in range(size):
win.Lock(i)
win.Flush(i)
win.Unlock(i)
comm.Barrier()
for i in range(size):
if i == rank:
win.Lock_all()
win.Flush_all()
win.Unlock_all()
comm.Barrier()
#
for i in range(size):
win.Lock(i)
win.Flush_local(i)
win.Unlock(i)
comm.Barrier()
for i in range(size):
if i == rank:
win.Lock_all()
win.Flush_local_all()
win.Unlock_all()
comm.Barrier()
class TestRMASelf(BaseTestRMA, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestRMAWorld(BaseTestRMA, unittest.TestCase):
COMM = MPI.COMM_WORLD
SpectrumMPI = MPI.get_vendor()[0] == 'Spectrum MPI'
try:
if SpectrumMPI: raise NotImplementedError
MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free()
except (NotImplementedError, MPI.Exception):
unittest.disable(BaseTestRMA, 'mpi-rma')
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,210,494,767,471,415,000 | 34.86836 | 87 | 0.463203 | false |
dragonrider7225/PythonGames | main.py | 1 | 2439 | #!py -3
from solitaire import *
import sys
def main():
opt = int(input("Which game would you like to play?\n\t0: Quit program\n" +
"\t1: Klondike\n"))
if not opt:
sys.exit(0)
if opt == 1:
game = klondike
args = []
game.set_up(*args)
game.show_board()
while True:
if game.get_result() == game.VICTORY:
print("YOU WIN!")
return
if game.get_result() == game.LOSE:
print("YOU LOSE!")
return
m = input("Move: ").split()
if game == klondike:
if m[0][0] == "s":
game.draw()
game.show_board()
elif m[0][0] == "m":
if m[1][0] == "w":
if m[2][0] == "f":
game.m()
elif m[2][0] == "l":
game.m(m[1], int(m[2][1:]))
else:
print_valid_moves(game)
continue
elif m[1][0] == "l":
if m[2][0] == "f":
game.m(int(m[1][1:]), "f")
elif m[2][0] == "l":
if len(m) == 3:
game.m(int(m[1][1:]), int(m[2][1:]))
else:
game.m(int(m[1][1:]), int(m[3]), int(m[2][1:]))
else:
print_valid_moves(game)
elif m[1][0] == "f":
if m[2][0] == "l":
game.m("f", int(m[1][1:]), int(m[2][1:]))
else:
print_valid_moves(game)
else:
print_valid_moves(game)
elif m[0][0] == "q":
sys.exit(0)
else:
print_valid_moves(game)
def print_valid_moves(game):
game.show_board()
print("Please enter a valid move:")
if game == klondike:
print("s[tock]")
print("m[ove] w[aste] f[oundation]")
print("m[ove] w[aste] lN")
print("m[ove] lN f[oundation]")
print("m[ove] lN1 lN2 C")
print("m[ove] fM lN")
print("q[uit]")
print("0 <= N* <= 6, 0 <= M <= 3, C is the number of cards", end=" ")
print("that are below the card to move from one layout", end=" ")
print("pile to another.")
if __name__ == "__main__":
main()
| apache-2.0 | -2,390,675,310,736,939,000 | 31.959459 | 79 | 0.373514 | false |
jaredhasenklein/the-blue-alliance | tbans/models/notifications/event_schedule.py | 1 | 2021 | import calendar
from tbans.models.notifications.notification import Notification
class EventScheduleNotification(Notification):
def __init__(self, event, next_match=None):
self.event = event
self._event_feed = event.key_name
self._district_feed = event.event_district_abbrev
if not next_match:
from helpers.match_helper import MatchHelper
upcoming = MatchHelper.upcomingMatches(event.matches, 1)
self.next_match = upcoming[0] if upcoming and len(upcoming) > 0 else None
else:
self.next_match = next_match
@classmethod
def _type(cls):
from consts.notification_type import NotificationType
return NotificationType.SCHEDULE_UPDATED
@property
def fcm_notification(self):
body = 'The {} match schedule has been updated.'.format(self.event.normalized_name)
if self.next_match and self.next_match.time:
time = self.next_match.time.strftime("%H:%M")
body += ' The next match starts at {}.'.format(time)
from firebase_admin import messaging
return messaging.Notification(
title='{} Schedule Updated'.format(self.event.event_short.upper()),
body=body
)
@property
def platform_config(self):
from tbans.consts.fcm.platform_priority import PlatformPriority
from tbans.models.fcm.platform_config import PlatformConfig
return PlatformConfig(priority=PlatformPriority.HIGH)
@property
def data_payload(self):
payload = {
'event_key': self.event.key_name
}
if self.next_match and self.next_match.time:
payload['first_match_time'] = calendar.timegm(self.next_match.time.utctimetuple())
else:
payload['first_match_time'] = None
return payload
@property
def webhook_message_data(self):
payload = self.data_payload
payload['event_name'] = self.event.name
return payload
| mit | 268,706,569,295,279,700 | 32.131148 | 94 | 0.642751 | false |
SF-Zhou/quite | quite/deferred_function.py | 1 | 3424 | from . import deferred_define
from . import Widget, QWidget
from . import QMainWindow, QDockWidget
from . import QHBoxLayout, SquareLayout
from . import WidgetController
from . import QSize, QSizeF, QPoint
from . import QPicture, QPixmap
from . import QPrinter, QPainter
@deferred_define
def set_central_widget(self: Widget, widget, del_pre_widget=True):
if isinstance(widget, WidgetController):
widget = widget.w
if not isinstance(widget, QWidget):
raise TypeError('Only Support Widget or WidgetController')
widget.setVisible(True) # ensure widget is visible
if hasattr(self, 'center_widget'):
self.layout().removeWidget(self.center_widget)
self.center_widget.setVisible(False) # hide pre widget, and widget can reuse
if del_pre_widget:
self.center_widget.deleteLater()
if isinstance(self, QMainWindow):
self.setCentralWidget(widget)
elif isinstance(self, QDockWidget):
self.setWidget(widget)
elif hasattr(self, 'center_widget'):
self.layout().addWidget(widget)
else:
layout = QHBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(widget)
self.setLayout(layout)
self.center_widget = widget
@deferred_define
def set_square_widget(self: Widget, widget: Widget, spacing=0):
if isinstance(widget, WidgetController):
widget = widget.w
if not isinstance(widget, QWidget):
raise TypeError('Only Support Widget or WidgetController')
layout = SquareLayout()
layout.setSpacing(spacing)
layout.addWidget(widget)
self.setLayout(layout)
self.center_widget = widget
@deferred_define
def set_layout_spacing(self: Widget, spacing):
layout = self.layout()
assert isinstance(layout, SquareLayout)
layout.setSpacing(spacing)
layout.update()
@deferred_define
def export_to_pdf(self: Widget, filename: str, export_size=QSize(1060, 730)):
assert isinstance(export_size, QSize)
w, h = self.size
if w > h:
self.resize(export_size.width(), export_size.height())
else:
self.resize(export_size.height(), export_size.width())
p = QPicture()
painter = QPainter(p)
self.render(painter, QPoint(0, 0))
painter.end()
printer = QPrinter()
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName(filename)
if w > h:
printer.setOrientation(QPrinter.Landscape)
if export_size.width() != 1060 or export_size.height() != 730:
printer.setPageSize(QPrinter.Custom)
printer.setPaperSize(QSizeF(self.size[1] * 0.8 + 20, self.size[0] * 0.8 + 20), QPrinter.Point)
painter = QPainter()
ok = painter.begin(printer)
if ok:
painter.drawPicture(0, 0, p)
ok = painter.end()
self.resize(w, h)
return ok
@deferred_define
def export_to_bitmap(self: Widget, filename: str, export_size=QSize(1060, 730)):
if filename.endswith('pdf'):
return export_to_pdf(self, filename)
assert isinstance(export_size, QSize)
w, h = self.size
if w > h:
self.resize(export_size.width(), export_size.height())
else:
self.resize(export_size.height(), export_size.width())
p = QPixmap(*self.size)
painter = QPainter(p)
self.render(painter, QPoint(0, 0))
painter.end()
ok = p.save(filename)
self.resize(w, h)
return ok
| mit | 8,475,001,357,784,681,000 | 29.571429 | 102 | 0.669977 | false |
strongrandom/awesome-fedora | generate.py | 1 | 6196 | # https://github.com/strongrandom/awesome-fedora
#
# This is a quick-and-dirty tool to convert packages.yml into README.md. It is not pretty. It is not efficient. It is
# nowhere near my best work... but it works.
#
# Use Python 3. Install PyYAML.
#
import operator
import textwrap
import dnf
import yaml
def write_packages(file, dictionary: dict):
if len(dictionary) > 0:
packages = "dnf install "
for wp_key_package_name in sorted(dictionary, key=operator.itemgetter(0)):
packages += wp_key_package_name + " "
file.write("\n```\n")
wp_wrap = textwrap.TextWrapper(break_long_words=False, break_on_hyphens=False, width=70,
initial_indent='', subsequent_indent=' ')
wp_lines = wp_wrap.wrap(packages)
count = 0
for wp_line in wp_lines:
file.write(wp_line)
if len(wp_lines) > 1 and count < len(wp_lines) - 1:
file.write(" \\\n")
else:
file.write("\n")
count += 1
file.write("```\n\n")
file.write("[back to index](#index)\n\n")
def parse_yaml(yaml_data_sorted, file_output):
for key_category, value_dict_packages in yaml_data_sorted:
if "__index" in key_category:
# *__index indicates where to put the index of groups
build_index(file_output, yaml_data_sorted)
elif "__" in key_category:
# __ in the key indicates plain text to output
file_output.write("{}\n\n".format(value_dict_packages))
elif type(value_dict_packages) is dict:
# otherwise, if a dict, process the group
file_output.write("***\n## {}\n".format(key_category))
build_group(file_output, value_dict_packages)
def build_group(file_output, value_dict_packages):
dict_group = dict()
dict_packages_sorted = sorted(value_dict_packages.items(), key=operator.itemgetter(0))
for key_package_name, value_package_attributes in dict_packages_sorted:
if "__" in key_package_name:
file_output.write("\n {}\n\n".format(value_package_attributes))
else:
dnf_result = dnf_query_available.filter(name=key_package_name)
if len(dnf_result) > 0:
# Ignore multiple results, just take the first (dnf.result[0])
build_package_entry(dict_group, dnf_result[0], file_output, key_package_name, value_package_attributes)
else:
print(" Package not found in DNF:", key_package_name)
write_packages(file_output, dict_group)
def build_package_entry(dict_group, dnf_package, file_output, key_package_name, value_package_attributes):
description = dnf_package.description
url = dnf_package.url
# Look up the repository and mark those from external repos
repository = dnf_package.repo.id
if repository == "fedora":
repository = ""
elif repository == "updates":
repository = ""
else:
repository = "{} ".format(repository)
if type(value_package_attributes) is dict:
if type(value_package_attributes.get("description")) is str:
description = value_package_attributes.get("description")
if type(value_package_attributes.get("url")) is str:
url = value_package_attributes.get("url")
if value_package_attributes.get("essential"):
dict_essential[key_package_name] = ""
# Hack in essential package checkmark
repository = "\u2713 " + repository
file_output.write(" * **{}[{}]({}): {}**\n".format(repository, key_package_name, url,
dnf_package.summary))
# Process the description field. Ugly, but works.
description = description.strip()
description = description.replace("\n", " ")
while " " in description:
description = description.replace(' ', ' ')
description = description.replace('`', "'")
description = description.replace('*', "'")
description = description.replace('>', ">")
description = description.replace('<', "<")
# Wrap and write out the description
wrap = textwrap.TextWrapper(width=70, max_lines=10,
initial_indent=' ', subsequent_indent=' ')
lines = wrap.wrap(description)
file_output.write("\n")
for line in lines:
file_output.write(line)
file_output.write("\n")
file_output.write("\n")
# Add to the dictionaries
dict_all[key_package_name] = ""
dict_group[key_package_name] = ""
def build_index(file_output, yaml_data_sorted):
# Hack to build an index
file_output.write("\n")
file_output.write("## Index\n")
for index_category, _ in yaml_data_sorted:
if "_" not in index_category:
index_link = str.lower(index_category)
index_link = index_link.replace(' ', '-')
index_link = index_link.replace('/', '')
index_link = index_link.replace('+', '')
file_output.write("- [{}](#{})\n".format(index_category, index_link))
file_output.write("\n")
if __name__ == '__main__':
print("Loading YAML ...")
stream = open('packages.yml', 'r')
yaml_data = yaml.load(stream)
file_output = open('README.md', 'w')
dict_all = dict()
dict_essential = dict()
with dnf.Base() as dnf_base:
print("Setting up DNF ...")
# DNF boilerplate, see http://dnf.readthedocs.io/en/latest/use_cases.html#id3
dnf_base.read_all_repos()
dnf_base.fill_sack()
dnf_query = dnf_base.sack.query()
dnf_query_available = dnf_query.available()
yaml_data_sorted = sorted(yaml_data.items(), key=operator.itemgetter(0))
print("Parsing ...")
parse_yaml(yaml_data_sorted, file_output)
file_output.write("# Everything #\n")
file_output.write("\nAll {} packages:\n".format(len(dict_all)))
write_packages(file_output, dict_all)
if len(dict_essential) > 0:
file_output.write("# Essential #\n")
write_packages(file_output, dict_essential)
file_output.close()
| mit | 2,793,258,951,590,367,000 | 33.614525 | 119 | 0.596191 | false |
CiscoDevNet/netconf-examples | netconf-102/get_config_csr1000V.py | 1 | 1234 | #!/usr/bin/python
#
# Get configured interfaces using Netconf
#
# [email protected]
#
from ncclient import manager
import sys
import xml.dom.minidom
# the variables below assume the user is requesting access
# to a IOS-XE device running in the DevNet Always On SandBox
# use the IP address or hostname of your IOS-XE device
HOST = 'ios-xe-mgmt.cisco.com'
# use the NETCONF port for your IOS-XE device
PORT = 10000
# use the user credentials for your IOS-XE device
USER = 'root'
PASS = 'C!sc0123'
# XML file to open
FILE = 'get_interfaces.xml'
# create a main() method
def get_configured_interfaces():
"""Main method that retrieves the interfaces from config via NETCONF."""
with manager.connect(host=HOST, port=PORT, username=USER, password=PASS,
hostkey_verify=False, device_params={'name': 'default'},
allow_agent=False, look_for_keys=False) as m:
with open(FILE) as f:
return(m.get_config('running', f.read()))
def main():
"""Simple main method calling our function."""
interfaces = get_configured_interfaces()
print(xml.dom.minidom.parseString(interfaces.xml).toprettyxml())
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 4,805,675,626,811,635,000 | 27.045455 | 81 | 0.679092 | false |
redixin/keyserver-ng | keyserver/key.py | 1 | 1886 | from datetime import datetime
import gpgme
from io import BytesIO
class PublicKey:
def __init__(self, fp):
if isinstance(fp, str):
fp = BytesIO(bytes(fp, "ascii"))
elif isinstance(fp, bytes):
fp = BytesIO(fp)
self.ctx = gpgme.Context()
self.ctx.armor = False
self._import = self.ctx.import_(fp)
self.fpr = self._import.imports[0][0]
self._key = self.ctx.get_key(self.fpr)
def __repr__(self):
return "<PublicKey: %s <%s>>: %s" % (self.name, self.email, self.fpr)
def encrypt(self, data):
data = BytesIO(bytes(data, "ascii"))
ciphertext = BytesIO()
self.ctx.encrypt([self._key], gpgme.ENCRYPT_ALWAYS_TRUST,
data, ciphertext)
ciphertext.seek(0)
return ciphertext.read()
def export(self, fp):
self.ctx.export(self.fpr, fp)
@property
def key(self):
fp = BytesIO()
self.export(fp)
fp.seek(0)
return fp.read()
@property
def expire(self):
expire = 0
for sk in self._key.subkeys:
if sk.expires > expire:
expire = sk.expires
if expire:
return datetime.fromtimestamp(expire)
@property
def ids(self):
return [sk.keyid for sk in self._key.subkeys]
@property
def name(self):
return self._key.uids[0].name
@property
def email(self):
return self._key.uids[0].email
class PrivateKey:
def __init__(self, data):
self.ctx = gpgme.Context()
self.ctx.armor = False
self._import = self.ctx.import_(BytesIO(bytes(data, "ascii")))
def decrypt(self, data):
plaintext = BytesIO()
ciphertext = BytesIO(data)
self.ctx.decrypt(ciphertext, plaintext)
plaintext.seek(0)
return plaintext.read()
| apache-2.0 | 8,411,964,592,434,458,000 | 24.486486 | 77 | 0.560445 | false |
leonardr/sycorax | timeline.py | 1 | 21201 | """Parse a Sycorax script into an annotated multi-author timeline."""
from datetime import datetime, timedelta
import json
import random
import re
import hashlib
import os
import pytz
# 10M: ~10 minutes later
# 4H: ~4 hours later
# 1D: the next day
# R: Reply to previous
# 10A: 10AM on the current day
# 9P: 9PM on the current day
DEFAULT_DELAY = timedelta(hours=4)
REPLY_TO_CODE = "R"
DELAY_CODE = re.compile("([0-9]+)([MHD])")
DELAY_UNITS = dict(M="minutes", H="hours", D="days")
TIME_OF_DAY_CODE = re.compile("([0-9]{1,2})([AP])")
JSON_TIME_FORMAT = "%d %b %Y %H:%M:%S %Z"
def load_config(directory):
filename = os.path.join(directory, "config.json")
if not os.path.exists(filename):
raise Exception("Could not find config.json file in directory %s" % (
directory
))
data = json.loads(open(filename).read().strip())
data['start_date'] = datetime.strptime(data['start_date'], "%Y/%m/%d")
data['chapter_duration_days'] = timedelta(
days=data['chapter_duration_days'])
return data
def load_stream(directory):
config = load_config(directory)
filename = os.path.join(directory, "input.txt")
if not os.path.exists(filename):
raise Exception("Could not find input.txt file in directory %s" % (
directory
))
progress = load_progress(directory)
try:
progress = load_progress(directory)
except Exception, e:
# Nothing has been posted yet.
progress = None
return Stream(open(filename), config=config, progress=progress)
def load_progress(directory):
config = load_config(directory)
filename = os.path.join(directory, "progress.json")
if not os.path.exists(filename):
raise Exception("Could not find progress.json file in directory %s" % (
directory
))
return Progress(open(filename))
class TimezoneAware(object):
def start_of_day(self, datetime):
return datetime.replace(
hour=0, minute=0, second=0, tzinfo=self.timezone)
class Progress(object):
"""The progress made in posting a stream."""
def __init__(self, input_stream):
self.timeline = [json.loads(line.strip()) for line in input_stream]
self.posts = {}
for post in self.timeline:
self.posts[post['internal_id']] = post
class TweetParser(TimezoneAware):
"""Parses a line of script into a tweet."""
def __init__(self, config, progress=None, fuzz_quotient=0.2,
fuzz_minimum_seconds=120):
self.authors = config['authors']
self.timezone = pytz.timezone(config['timezone'])
for author in self.authors:
author['account'] = author['account'].encode("utf8")
author['css_class'] = author['account'].replace(
'-', '').replace('_', '')
author['color'] = author.get('color', 'white').encode("utf8")
author['code'] = author.get('code', '')
fuzz = float(config.get('fuzz', fuzz_quotient))
self.fuzz_quotient = fuzz
self.fuzz_minimum_seconds = int(config.get('fuzz_minimum_seconds', fuzz_minimum_seconds))
self.start_date=config['start_date']
self.config = config
self.progress = progress
self.default_author = None
self.authors_by_code = {}
for author in self.authors:
code = author.get('code', '')
if code == '':
self.default_author = author
self.authors_by_code[code] = author
def parse(self, line, stream_so_far):
is_command = False
author = self.default_author
reply_to = None
delay = None
hour_of_day = None
if stream_so_far.latest_tweet is None:
# This is the first tweet ever. The base timecode is the
# start date.
base_timecode = self.start_of_day(self.start_date)
elif stream_so_far.current_chapter.total_tweets == 0:
# This is the first tweet of the chapter. The base timecode
# is the chapter start date.
base_timecode = stream_so_far.current_chapter.start_date
else:
# There is no base timecode. The timestamp will be calculated
# based on the previous tweet's timestamp.
base_timecode = None
line = line.strip()
command_and_tweet = line.split(" ", 1)
if len(command_and_tweet) > 1:
command, tweet = command_and_tweet
else:
# Single-word tweet.
return Tweet(line, author, base_timecode, self.timezone,
progress=self.progress)
# The "command" may actually be the first word of the tweet.
# Extract commands from it until there's nothing left.
# If there is something left, it's not a command.
for author_code, possible_author in self.authors_by_code.items():
if author_code != "" and author_code in command:
author = possible_author
command = command.replace(author_code, "", 1)
break
is_reply = False
if REPLY_TO_CODE in command:
reply_to = stream_so_far.latest_tweet
command = command.replace(REPLY_TO_CODE, "", 1)
is_reply = True
match = DELAY_CODE.match(command)
if match is not None:
number, unit = match.groups()
subcommand = "".join(match.groups())
command = command.replace(subcommand, "")
kwargs = { DELAY_UNITS[unit]: int(number) }
delay = timedelta(**kwargs)
match = TIME_OF_DAY_CODE.match(command)
if match is not None:
hour, am = match.groups()
subcommand = "".join(match.groups())
command = command.replace(subcommand, "")
hour = int(hour)
if am == "A" and hour == 12:
hour = 0
if am == "P" and hour != 12:
hour += 12
if hour > 23:
raise ValueError("Bad time of day %s in %s" % (
subcommand, line))
hour_of_day = hour
if command == "":
# The first word has been entirely processed as
# commands. The rest of the line is the actual content.
line = tweet
else:
# The first word was not a command.
author = self.default_author
reply_to = None
delay = None
is_reply = False
if is_reply and stream_so_far.latest_tweet is None:
raise ValueError(
"The first tweet in the script cannot be a reply.")
if delay is None and hour_of_day is None:
if len(stream_so_far.current_day.tweets) == 0 and stream_so_far.current_chapter.total_tweets > 0:
# This is the first tweet of an in-story day, and no
# special date instructions were given, so publish it at
# the start of the next real-world day.
base_timecode = self.start_of_day(base_timecode) + timedelta(
days=1)
elif stream_so_far.current_chapter.total_tweets == 0:
delay = timedelta(minutes=0)
if len(line) > 140:
print '[WARNING] %d characters in "%s"' % (len(line), line)
return Tweet(line, author, base_timecode, self.timezone, delay,
hour_of_day, reply_to, self.progress)
class Chapter:
def __init__(self, name, start_date):
self.name = name
self.days = []
self.start_date = start_date
@property
def in_story_timeline_html(self):
return "\n".join(
["<h2>%s</h2>\n" % self.name] +
#["<p>%s days, %s tweets</p>\n" % (
# len(self.days), self.total_tweets)] +
[day.in_story_timeline_html for day in self.days])
@property
def real_world_timeline_html(self):
chapter_start_date = self.start_date.strftime(Tweet.REAL_WORLD_TIMELINE_DATE_FORMAT)
if chapter_start_date != self.real_days[0].date:
print '[WARNING] Chapter "%s" starts on %s, but its first tweet happens on %s' % (
self.name, chapter_start_date, self.real_days[0].date)
return "\n".join(
["<h2>%s</h2>\n" % self.name] +
[day.real_world_timeline_html for day in self.real_days])
@property
def total_tweets(self):
return sum(len(x.tweets) for x in self.days)
@property
def all_tweets(self):
for d in self.days:
for t in d.tweets:
yield t
@property
def real_days(self):
"""A list of Day objects corresponding to real-world days for this chapter."""
days = []
current_date = None
current_day = None
for story_day in self.days:
for tweet in story_day.tweets:
if tweet.timestamp_date_str != current_date:
current_date = tweet.timestamp_date_str
current_day = Day(current_date)
days.append(current_day)
current_day.tweets.append(tweet)
return days
class Day:
"""A day's worth of tweets--either an in-story day or a real-world day."""
def __init__(self, date):
self.date = date
self.tweets = []
@property
def in_story_timeline_html(self):
if len(self.tweets) == 0:
return ""
return "\n".join(
["<h3>%s</h3>" % self.date, "<ul>"] +
[tweet.in_story_timeline_html for tweet in self.tweets] + ["</ul>"])
@property
def real_world_timeline_html(self):
if len(self.tweets) == 0:
return ""
return "\n".join(
["<h3>%s</h3>" % self.date, "<ul>"] +
[tweet.real_world_timeline_html for tweet in self.tweets] + ["</ul>"])
class Tweet(TimezoneAware):
REAL_WORLD_TIMELINE_TIME_FORMAT = "%H:%M"
REAL_WORLD_TIMELINE_DATE_FORMAT = "%a %d %b"
def __init__(self, text, author, base_timecode, timezone, delay=None,
hour_of_day=None, in_reply_to=None, progress=None):
self.text = text
self.author = author
self.timezone = timezone
self.in_reply_to = in_reply_to
self.digest = hashlib.md5(self.text).hexdigest()
self.delay = delay
self.hour_of_day = hour_of_day
if self.delay is None and self.hour_of_day is None:
self.delay = DEFAULT_DELAY
self.base_timecode = base_timecode
# In general, timestamps are calculated in a second pass.
self.timestamp = None
# However, if this tweet has already been posted, we know its
# timestamp already.
if progress is not None:
as_posted = progress.posts.get(self.digest)
if as_posted is not None:
self.timestamp = datetime.strptime(
as_posted['planned_timestamp'], JSON_TIME_FORMAT).replace(
tzinfo=pytz.timezone("UTC"))
if (self.hour_of_day is not None and self.delay is not None
and self.delay < timedelta(days=1)):
raise ValueError(
'"%s" defines both a delay and an hour of day, but the delay '
'is less than one day.' % text)
def calculate_timestamp(self, fuzz_quotient, fuzz_minimum_seconds,
previous_tweet):
timestamp = self.base_timecode or previous_tweet.timestamp
if self.timestamp is not None:
# This tweet already has a timestamp, possibly because
# it's already been posted. Leave it alone.
return self.timestamp
# If the delay after the last tweet is one day or more, apply
# it before setting the time of day.
one_day = timedelta(days=1)
if self.delay is not None and self.delay >= one_day:
timestamp += self.delay
timestamp = self.start_of_day(timestamp)
# If a time of day is given, set it now.
if self.hour_of_day is not None:
if timestamp.hour > self.hour_of_day:
# Bump to the next real-world day.
timestamp = timestamp + timedelta(days=1)
timestamp = timestamp.replace(
hour=self.hour_of_day, minute=0, second=0)
# If the delay is less than one day, apply it now.
if self.delay is not None and self.delay < one_day:
timestamp += self.delay
# Now we have a precise timestamp. But posting one tweet
# exactly 30 minutes after another one will look fake. We need
# to fudge the timestamp a little.
if self.hour_of_day is not None:
# We know which hour the tweet should go out. Pick
# sometime in the first 45 minutes of that hour, to
# minimize the chances of collisions with future tweets.
actual_delta = timedelta(seconds=random.randint(0, 45*60))
timestamp = timestamp + actual_delta
elif self.delay is not None:
# We know approximately how long after the previous tweet
# this tweet should go out. Pick sometime
delay_seconds = self.delay.seconds
maximum_variation = max(
delay_seconds * fuzz_quotient, fuzz_minimum_seconds)
actual_variation = random.randint(-maximum_variation, maximum_variation)
actual_delta = timedelta(seconds=actual_variation)
if random.randint(0,1) == 1:
timestamp = timestamp + actual_delta
else:
timestamp = timestamp - actual_delta
else:
raise ValueError(
'Tweet "%s" has neither hour-of-day nor delay since previous '
'tweet. Cannot calculate timestamp.' % self.text)
return timestamp
@property
def json(self):
if self.in_reply_to is None:
in_reply_to = None
else:
in_reply_to = self.in_reply_to.digest
d = dict(internal_id=self.digest, text=self.text,
author=self.author['account'],
in_reply_to=in_reply_to, timestamp=self.timestamp_for_json)
return json.dumps(d)
def li(self, text):
a = []
if self.in_reply_to is not None:
a.append("<ul>")
a.append('<li class="%s">%s</li>' % (self.author['css_class'], text))
if self.in_reply_to is not None:
a.append("</ul>")
return "\n".join(a)
@property
def in_story_timeline_html(self):
return self.li(self.text)
@property
def timestamp_str(self):
return self.timestamp.strftime(self.REAL_WORLD_TIMELINE_TIME_FORMAT)
@property
def timestamp_for_json(self):
return self.timestamp.astimezone(pytz.timezone("UTC")).strftime(
JSON_TIME_FORMAT)
@property
def timestamp_date_str(self):
return self.timestamp.strftime(self.REAL_WORLD_TIMELINE_DATE_FORMAT)
@property
def real_world_timeline_html(self):
text = self.timestamp_str + " " + self.text
return self.li(text)
class Stream:
def __init__(self, lines, tweet_parser=None, config=None, progress=None):
if tweet_parser is None:
if config is None:
raise ValueError(
"You tried to create a stream without providing a "
"tweet parser or a configuration for one.")
tweet_parser = TweetParser(config=config, progress=progress)
self.current_chapter = None
self.current_day = None
self.chapters = []
self.tweet_parser = tweet_parser
self.latest_tweet = None
for line in lines:
line = line.strip()
if len(line) == 0:
continue
if line[:3] == "== ":
self.end_chapter()
self.begin_chapter(line[3:])
elif line[:3] == "-- ":
self.end_day()
self.begin_day(line[3:])
else:
self.add_tweet(line)
self.end_chapter()
self.add_fuzz()
self.chapter_start_sanity_check()
def html_page(self, real_time=False):
START = '''<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
'''
l = [START]
l.append('<style type="text/css">')
for author in self.tweet_parser.authors:
l.append(".%s { background-color: %s }" % (
author['css_class'], author['color']))
l.append('</style></head><body>')
l.append("<p>Author guide:</p>")
l.append("<ul>")
for author in self.tweet_parser.authors:
l.append('<li class="%s">%s</a>' % (author['css_class'], author['account']))
l.append("</ul>")
if real_time:
l.append(self.real_world_timeline_html)
else:
l.append(self.in_story_timeline_html)
l.append("</body></html")
return "\n".join(l)
@property
def in_story_timeline_html(self):
return "\n\n".join(chapter.in_story_timeline_html for chapter in self.chapters)
@property
def real_world_timeline_html(self):
return "\n\n".join(chapter.real_world_timeline_html for chapter in self.chapters)
@property
def tweets(self):
for chapter in self.chapters:
for day in chapter.days:
for tweet in day.tweets:
yield tweet
def add_tweet(self, line):
if self.current_chapter is None:
self.begin_chapter("")
if self.current_day is None:
self.begin_day("")
line = line.strip()
tweet = self.tweet_parser.parse(line, self)
self.current_day.tweets.append(tweet)
self.latest_tweet = tweet
return tweet
def end_chapter(self):
if self.current_chapter is None:
# No current chapter.
return
self.end_day()
self.current_chapter = None
def begin_chapter(self, chapter_name):
if len(self.chapters) == 0:
start_date = self.tweet_parser.start_of_day(
self.tweet_parser.config['start_date'])
else:
previous_chapter = self.chapters[-1]
duration = self.tweet_parser.config['chapter_duration_days']
start_date = previous_chapter.start_date + duration
self.current_chapter = Chapter(chapter_name, start_date)
self.chapters.append(self.current_chapter)
def end_day(self):
if self.current_day is None:
return
self.current_day = None
def begin_day(self, date):
self.current_day = Day(date)
self.current_chapter.days.append(self.current_day)
def add_fuzz(self):
previous_tweet = None
for tweet in self.tweets:
progress = self.tweet_parser.progress
if (progress is not None
and progress.posts.get(tweet.digest) is not None):
# This tweet has already been posted. Don't mess with it.
previous_tweet = tweet
continue
success = False
for i in range(0, 10):
tweet.timestamp = tweet.calculate_timestamp(
self.tweet_parser.fuzz_quotient,
self.tweet_parser.fuzz_minimum_seconds,
previous_tweet)
if (previous_tweet is None
or previous_tweet.timestamp < tweet.timestamp):
# This timestamp is fine. Stop trying to calculate it.
success = True
break
# If we didn't break, the timestamp we calculated came
# before previous tweet's timestamp, which is a
# problem. Restart the loop and calculate a different
# timestamp.
if not success:
# We tried to calculate the timestamp ten times with
# no success. Raise an error.
raise ValueError('Calculated timestamp for "%s" is %s, which comes before calculated timestamp for the previous tweet "%s" (%s). Trying again may help.' % (
tweet.text, tweet.timestamp_str, previous_tweet.text, previous_tweet.timestamp_str))
previous_tweet = tweet
def chapter_start_sanity_check(self):
previous_chapter = self.chapters[0]
for chapter in self.chapters[1:]:
tweets = list(previous_chapter.all_tweets)
if len(tweets) > 0:
previous_chapter_last_tweet = tweets[-1]
if previous_chapter_last_tweet.timestamp > chapter.start_date:
print '[WARNING] Last tweet in chapter "%s" overlaps the start of chapter "%s"' % (
previous_chapter.name, chapter.name)
previous_chapter = chapter
@property
def json(self):
return "\n".join(tweet.json for tweet in self.tweets)
| bsd-2-clause | -76,825,165,262,542,300 | 35.61658 | 172 | 0.566058 | false |
maxis1314/pyutils | web/views/user.py | 1 | 1484 | # coding: utf-8
from flask import Flask,request,session,g,redirect,url_for,Blueprint
from flask import abort,render_template,flash
from helpers import getAvatar
import config
#from .base import BaseHandler
import base
config = config.rec()
user = Blueprint('user', __name__)
import pika
#class LoginHandler(BaseHandler):
@user.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
if base.isAdmin():
return redirect("/")
else:
return render_template("login.html",getAvatar=getAvatar)
username = request.form['username']
password = request.form['password']
#connection = pika.BlockingConnection(pika.ConnectionParameters(
# host='localhost'))
#channel = connection.channel()
#channel.queue_declare(queue='hello')
#channel.basic_publish(exchange='',
# routing_key='hello',
# body=u'u:'+username+' p:'+password)
#print(" [x] Sent 'RABBITQUEUE'")
#connection.close()
if base.userAuth(username, password):
flash('You were successfully logged in')
base.currentUserSet(username)
return redirect("/posts/")
else:
flash('User name or password error','error')
return redirect("/user/login")
#class LogoutHandler(BaseHandler):
@user.route('/logout')
def logout():
session.pop('user',None)
flash('You were successfully logged out')
return redirect('/user/login')
| apache-2.0 | -7,283,646,547,762,287,000 | 29.285714 | 69 | 0.644879 | false |
respawner/peering-manager | peering/migrations/0050_auto_20190806_2159.py | 1 | 3436 | # Generated by Django 2.2.4 on 2019-08-06 19:59
import taggit.managers
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("taggit", "0003_taggeditem_add_unique_index"),
("peering", "0049_auto_20190731_1946"),
]
operations = [
migrations.AddField(
model_name="autonomoussystem",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="bgpgroup",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="community",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="directpeeringsession",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="internetexchange",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="internetexchangepeeringsession",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="router",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="routingpolicy",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="template",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
]
| apache-2.0 | 1,025,210,879,864,040,000 | 31.72381 | 60 | 0.489814 | false |
nlsynth/iroha | examples/shared_reg_options.py | 1 | 1324 | import sys
sys.path.append('../py')
from iroha import *
from iroha.iroha import *
d = IDesign()
mod = IModule(d, "mod")
tab1 = ITable(mod)
sreg = design_tool.CreateSharedReg(tab1, "o", 32)
wtab = ITable(mod)
w = design_tool.CreateSharedRegWriter(wtab, sreg)
wst1 = IState(wtab)
wst2 = IState(wtab)
wst3 = IState(wtab)
wtab.initialSt = wst1
wtab.states.append(wst1)
wtab.states.append(wst2)
wtab.states.append(wst3)
design_tool.AddNextState(wst1, wst2)
design_tool.AddNextState(wst2, wst3)
winsn = IInsn(w)
rc = design_tool.AllocConstNum(wtab, False, 32, 123)
winsn.inputs.append(rc)
winsn.operand = "notify"
wst1.insns.append(winsn)
spinsn = IInsn(w)
spinsn.inputs.append(rc)
spinsn.operand = "put_mailbox"
wst2.insns.append(spinsn)
rtab = ITable(mod)
r = design_tool.CreateSharedRegReader(rtab, sreg)
rst1 = IState(rtab)
rst2 = IState(rtab)
rst3 = IState(rtab)
rtab.initialSt = rst1
rtab.states.append(rst1)
rtab.states.append(rst2)
rtab.states.append(rst3)
design_tool.AddNextState(rst1, rst2)
design_tool.AddNextState(rst2, rst3)
rinsn = IInsn(r)
reg = IRegister(rtab, "r_local")
rinsn.outputs.append(reg)
rinsn.operand = "wait_notify"
rst1.insns.append(rinsn)
sginsn = IInsn(r)
sginsn.inputs.append(rc)
sginsn.operand = "get_mailbox"
rst2.insns.append(sginsn)
design_tool.ValidateIds(d)
DesignWriter(d).Write()
| bsd-3-clause | -322,881,621,275,133,300 | 21.440678 | 52 | 0.746979 | false |
lquirosd/TFM | ILA/code/trainGMM.py | 1 | 5034 | from __future__ import division
import sys, argparse #--- To handle console arguments
import numpy as np #--- To handle math processing
import scipy.ndimage as ndi #--- To handle image processing
from scipy import misc
import glob, os #--- To handle OS callbacks
import utils
from sklearn import mixture
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
try:
import cPickle as pickle
except:
import pickle
def main():
"""
#---------------------------------------------------------------------------#
#--- main ---#
#---------------------------------------------------------------------------#
Description:
main module
Inputs:
#--- To be updated
Outputs:
#--- To be updated
Author:
Quiros Diaz, Lorenzo
Date:
Jun/20/2016
#------------------------------------------------------------------------------#
"""
#--- processing arguments
parser = argparse.ArgumentParser(description='K-NN classifier')
parser.add_argument('-trDir', required=True, action="store", help="Pointer to Training images folder")
parser.add_argument('-o', '--out', required=True, default=".", action="store", help="Folder to save Out files")
parser.add_argument('-nU', '--nUpper', type=int, default=2, action="store", help="Number of Mixtures for Upper Model [Default=2]")
parser.add_argument('-nB', '--nBottom', type=int, default=3, action="store", help="Number of Mixtures for Bottom Model [Default=3]")
parser.add_argument('-s', '--statistics', action="store_true", help="Print some statistics about script execution")
parser.add_argument('-p', '--plot', action="store_true", help="Show plot on window")
parser.add_argument('--debug', action="store_true", help="Run script on Debugging mode")
args = parser.parse_args()
if (args.debug): print args
if (args.statistics): init = time.clock()
#--- Validate arguments
if (not os.path.isdir(args.trDir)):
print "Folder: %s does not exists\n" %args.trDir
parser.print_help()
sys.exit(2)
if (not os.path.isdir(args.out)):
print "Folder: %s does not exists\n" %args.out
parser.print_help()
sys.exit(2)
#--- Read images
allImgs = glob.glob(args.trDir + "/*.jpg")
nImgs = len(allImgs)
if nImgs <= 0:
print "Folder: %s contains no images\n" %args.trDir
parser.print_help()
sys.exit(2)
if (args.statistics): GPinit = time.clock()
#--- keep all image data, just to check memory usage
#--- TODO: remove unnecessary data on each iteration
imgData = np.empty(nImgs, dtype=object)
#--- Array of Upper corners
U = np.zeros((nImgs, 2), dtype=np.int)
#--- Array of Bottom corners
B = np.zeros((nImgs, 2), dtype=np.int)
#--- get U & B corners from all TR dataSet
for i, file in enumerate(allImgs):
print "Working on {0:}".format(file)
imgData[i] = utils.imgPage(file)
#imgData[i].readImage()
imgData[i].parseXML()
U[i] = imgData[i].getUpperPoints()
B[i] = imgData[i].getBottomPoints()
if (args.statistics): print 'Getting Data Points: {0:.5f} seconds'.format(time.clock() - GPinit)
if (args.statistics): TGinit = time.clock()
#--- Train GMM Models
#--- Upper GMM
uGMM = mixture.GMM(n_components = args.nUpper)
uGMM.fit(U)
#--- Bottom GMM
bGMM = mixture.GMM(n_components = args.nBottom, covariance_type='diag')
bGMM.fit(B)
GMM_models = {'Upper': uGMM, 'Bottom': bGMM}
#--- Save Models to file
#--- Out File Name
outFile = args.out + 'GMM_tr' + str(nImgs) + '_u' + str(args.nUpper) + '_b' + str(args.nBottom)
fh = open(outFile + '.model', 'w')
pickle.dump(GMM_models, fh)
fh.close()
if (args.statistics): print 'Training GMM: {0:.5f} seconds'.format(time.clock() - TGinit)
#--- Plot Mixtures and Data
m=9
imgData[m].readImage(full=True)
fig, axs = plt.subplots(1,1)
axs.scatter(U[:, 0], U[:, 1], .8, color='red')
axs.scatter(B[:, 0], B[:, 1], .8, color='blue')
x = np.linspace(0, imgData[m].imgShape[1])
y = np.linspace(0, imgData[m].imgShape[0])
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
uZ = -uGMM.score_samples(XX)[0]
uZ = uZ.reshape(X.shape)
bZ = -bGMM.score_samples(XX)[0]
bZ = bZ.reshape(X.shape)
CSu = axs.contour(X, Y, uZ, norm=LogNorm(vmin=np.min(uZ), vmax=np.max(uZ)),
levels=np.logspace(0, 3, 20))
CSb = axs.contour(X, Y, bZ, norm=LogNorm(vmin=np.min(bZ), vmax=np.max(bZ)),
levels=np.logspace(0, 3, 20))
#axs.clabel(CS, inline=1, fontsize=10)
CB = plt.colorbar(CSu, ax=axs, extend='both')
axs.imshow(imgData[m].img, cmap='gray')
plt.axis('off')
fig.savefig(outFile + '.png', bbox_inches='tight')
if (args.statistics): print 'Total Time: {0:.5f} seconds'.format(time.clock() - init)
if (args.plot): plt.show()
if __name__ == '__main__':
main()
| apache-2.0 | -933,522,224,188,337,200 | 36.288889 | 135 | 0.591577 | false |
offlinehacker/flumotion | flumotion/test/test_wizard_save.py | 1 | 43842 | # -*- Mode: Python; test-case-name: flumotion.test.test_wizard_models -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import unittest
from kiwi.python import Settable
from flumotion.admin.assistant.configurationwriter import ConfigurationWriter
from flumotion.admin.assistant.models import Component, Plug, Porter, \
AudioProducer, VideoProducer, AudioEncoder, VideoEncoder, HTTPServer
from flumotion.admin.assistant.save import AssistantSaver
from flumotion.common import testsuite
from flumotion.common.xmlwriter import XMLWriter
from flumotion.configure import configure
from flumotion.component.producers.firewire.wizard_gtk import FireWireProducer
from flumotion.component.consumers.httpstreamer.wizard_gtk import HTTPStreamer
from flumotion.component.encoders.vorbis.wizard_gtk import VorbisAudioEncoder
from flumotion.component.encoders.theora.wizard_gtk import TheoraVideoEncoder
from flumotion.component.producers.videotest.wizard_gtk import \
TestVideoProducer
from flumotion.component.producers.audiotest.wizard_gtk import \
TestAudioProducer
from flumotion.admin.gtk.overlaystep import Overlay
class TestXMLWriter(testsuite.TestCase):
def testEmpty(self):
writer = ConfigurationWriter('', [], [])
testsuite.diffStrings(
XMLWriter.encoding + \
("<planet>\n"
"</planet>\n"),
writer.getXML())
def testFlowComponent(self):
c = Component()
c.name = 'name'
c.componentType = 'streamer'
c.worker = 'worker'
writer = ConfigurationWriter('flow', [c], [])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <flow name="flow">\n'
' <component name="name"\n'
' type="streamer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
def testAtmosphereComponent(self):
c = Component()
c.name = 'name'
c.componentType = 'streamer'
c.worker = 'worker'
c.properties.foo = 'bar'
writer = ConfigurationWriter('', [], [c])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="name"\n'
' type="streamer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="foo">bar</property>\n'
' </component>\n'
' </atmosphere>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
def testComponentWithPlug(self):
c = Component()
c.name = 'name'
c.componentType = 'streamer'
c.worker = 'worker'
plug = Plug()
plug.plugType = 'plug-type'
plug.properties.foo = 'bar'
c.plugs.append(plug)
writer = ConfigurationWriter('flow', [c], [])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <flow name="flow">\n'
' <component name="name"\n'
' type="streamer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <plugs>\n'
' <plug type="plug-type">\n'
' \n'
' <property name="foo">bar</property>\n'
' </plug>\n'
' </plugs>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
def testComponentWithFeeders(self):
c1 = Component()
c1.name = 'name'
c1.componentType = 'first'
c1.worker = 'worker'
c2 = Component()
c2.name = 'name'
c2.componentType = 'second'
c2.worker = 'worker'
c2.link(c1)
writer = ConfigurationWriter('flow', [c1, c2], [])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <flow name="flow">\n'
' <component name="name"\n'
' type="first"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>name</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="name"\n'
' type="second"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
class TestWizardSave(testsuite.TestCase):
def _createAudioProducer(self, componentType='audio-producer',
worker='audio-producer-worker'):
audioProducer = AudioProducer()
audioProducer.componentType = componentType
audioProducer.worker = worker
return audioProducer
def _createVideoProducer(self, componentType='video-producer',
worker='video-producer-worker'):
videoProducer = VideoProducer()
videoProducer.componentType = componentType
videoProducer.worker = worker
videoProducer.properties.width = 640
videoProducer.properties.height = 480
return videoProducer
def _createVideoOverlay(self, videoProducer):
overlay = Overlay(videoProducer)
overlay.worker = 'overlay-worker'
return overlay
def _createAudioEncoder(self):
audioEncoder = AudioEncoder()
audioEncoder.componentType = 'audio-encoder'
audioEncoder.worker = 'audio-encoder-worker'
return audioEncoder
def _createVideoEncoder(self):
videoEncoder = VideoEncoder()
videoEncoder.componentType = 'video-encoder'
videoEncoder.worker = 'video-encoder-worker'
return videoEncoder
def _createPorter(self):
return Porter('porter-worker',
port=8080,
username='username',
password='password',
socketPath='flu-XXXX.socket')
def _createHTTPStreamer(self):
streamer = HTTPStreamer()
streamer.worker = 'streamer-worker'
return streamer
def _createFirewireProducer(self):
producer = FireWireProducer()
producer.worker = 'firewire-video-producer-worker'
producer.properties.width = 640
producer.properties.height = 480
return producer
def testDefaultStream(self):
save = AssistantSaver()
save.setFlowName('flow')
save.setAudioProducer(self._createAudioProducer())
videoProducer = self._createVideoProducer()
save.setVideoProducer(videoProducer)
save.setVideoOverlay(self._createVideoOverlay(videoProducer))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
server = HTTPServer('server-worker', '/mount/')
save.addServerConsumer(server, 'audio-video')
save.setUseCCLicense(True)
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-audio-video"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount/</property>\n'
' </component>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="audio-producer"\n'
' project="flumotion"\n'
' worker="audio-producer-worker"\n'
' version="%(version)s">\n'
' </component>\n'
' <component name="producer-video"\n'
' type="video-producer"\n'
' project="flumotion"\n'
' worker="video-producer-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="height">480</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-video</feed>\n'
' </eater>\n'
' \n'
' <property name="cc-logo">True</property>\n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testMultiFeedProducer(self):
save = AssistantSaver()
save.setFlowName('flow')
save.setAudioProducer(self._createAudioProducer(
worker='both-producer-worker',
componentType='both-producer'))
save.setVideoProducer(self._createVideoProducer(
componentType='both-producer',
worker='both-producer-worker'))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio-video"\n'
' type="both-producer"\n'
' project="flumotion"\n'
' worker="both-producer-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="height">480</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testOggStream(self):
save = AssistantSaver()
save.setFlowName('flow')
audioProducer = TestAudioProducer()
audioProducer.worker = 'worker'
save.setAudioProducer(audioProducer)
videoProducer = TestVideoProducer()
videoProducer.worker = 'worker'
videoProducer.properties.width = 320
videoProducer.properties.height = 240
save.setVideoProducer(videoProducer)
save.setVideoOverlay(self._createVideoOverlay(videoProducer))
audioEncoder = VorbisAudioEncoder()
audioEncoder.worker = 'worker'
save.setAudioEncoder(audioEncoder)
videoEncoder = TheoraVideoEncoder()
videoEncoder.worker = 'worker'
save.setVideoEncoder(videoEncoder)
save.setMuxer('ogg-muxer', 'muxer-worker')
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="audiotest-producer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="samplerate">44100</property>\n'
' </component>\n'
' <component name="producer-video"\n'
' type="videotest-producer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="height">240</property>\n'
' <property name="pattern">0</property>\n'
' <property name="width">320</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-video</feed>\n'
' </eater>\n'
' \n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' <property name="xiph-logo">True</property>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="theora-encoder"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' \n'
' <property name="bitrate">400000</property>\n'
' <property name="keyframe-maxdistance">50</property>\n'
' <property name="speed">3</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="vorbis-encoder"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio</feed>\n'
' </eater>\n'
' \n'
' <property name="bitrate">64000</property>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="ogg-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testAudioOnlyStream(self):
save = AssistantSaver()
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
save.setFlowName('flow')
audioProducer = TestAudioProducer()
audioProducer.worker = 'worker'
save.setAudioProducer(audioProducer)
audioEncoder = VorbisAudioEncoder()
audioEncoder.worker = 'worker'
save.setAudioEncoder(audioEncoder)
videoProducer = self._createVideoEncoder()
self.assertRaises(ValueError, save.setVideoOverlay,
self._createVideoOverlay(videoProducer))
save.setMuxer('ogg-muxer', 'muxer')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="audiotest-producer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="samplerate">44100</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="vorbis-encoder"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio</feed>\n'
' </eater>\n'
' \n'
' <property name="bitrate">64000</property>\n'
' </component>\n'
' <component name="muxer-audio"\n'
' type="ogg-muxer"\n'
' project="flumotion"\n'
' worker="muxer"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testFirewireStreamer(self):
save = AssistantSaver()
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
save.setFlowName('flow')
producer = self._createFirewireProducer()
save.setAudioProducer(producer)
save.setVideoProducer(producer)
save.setVideoOverlay(self._createVideoOverlay(producer))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
server = HTTPServer('server-worker', '/mount/')
save.addServerConsumer(server, 'audio-video')
save.setUseCCLicense(True)
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-audio-video"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount/</property>\n'
' </component>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio-video"\n'
' type="firewire-producer"\n'
' project="flumotion"\n'
' worker="firewire-video-producer-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="decoder">ffdec_dvvideo</property>\n'
' <property name="deinterlace-method">ffmpeg</property>\n'
' <property name="deinterlace-mode">auto</property>\n'
' <property name="framerate">25/2</property>\n'
' <property name="height">480</property>\n'
' <property name="is-square">True</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video:video</feed>\n'
' </eater>\n'
' \n'
' <property name="cc-logo">True</property>\n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video:audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testFirewireStreamerDifferentWorkers(self):
save = AssistantSaver()
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
save.setFlowName('flow')
audioProducer = self._createFirewireProducer()
audioProducer.worker = 'audio-worker'
save.setAudioProducer(audioProducer)
videoProducer = self._createFirewireProducer()
videoProducer.worker = 'video-worker'
save.setVideoProducer(videoProducer)
save.setVideoOverlay(self._createVideoOverlay(videoProducer))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
streamer = self._createHTTPStreamer()
streamer.has_bandwidth_limit = True
streamer.bandwidth_limit = 123
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
server = HTTPServer('server-worker', '/mount/')
save.addServerConsumer(server, 'audio-video')
save.setUseCCLicense(True)
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-audio-video"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount/</property>\n'
' </component>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="firewire-producer"\n'
' project="flumotion"\n'
' worker="audio-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="decoder">ffdec_dvvideo</property>\n'
' <property name="deinterlace-method">ffmpeg</property>\n'
' <property name="deinterlace-mode">auto</property>\n'
' <property name="framerate">25/2</property>\n'
' <property name="height">480</property>\n'
' <property name="is-square">True</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="producer-video"\n'
' type="firewire-producer"\n'
' project="flumotion"\n'
' worker="video-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="decoder">ffdec_dvvideo</property>\n'
' <property name="deinterlace-method">ffmpeg</property>\n'
' <property name="deinterlace-mode">auto</property>\n'
' <property name="framerate">25/2</property>\n'
' <property name="height">480</property>\n'
' <property name="is-square">True</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-video:video</feed>\n'
' </eater>\n'
' \n'
' <property name="cc-logo">True</property>\n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio:audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="bandwidth-limit">123000000</property>\n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testOndemand(self):
save = AssistantSaver()
server = HTTPServer('ondemand-server-worker', '/mount-point/')
save.addServerConsumer(server, 'ondemand')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-ondemand"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="ondemand-server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount-point/</property>\n'
' </component>\n'
' </atmosphere>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
class TestNameConflicts(testsuite.TestCase):
def setUp(self):
self.save = AssistantSaver()
def _addServer(self, name):
server = HTTPServer('ondemand-server-worker', '/mount-point/')
self.save.addServerConsumer(server, name)
def testNameConflicts(self):
self.save.setExistingComponentNames(['http-server-ondemand'])
self._addServer('ondemand')
self.save.getXML()
components = self.save.getAtmosphereComponents()
self.assertEquals(components[0].name, 'http-server-ondemand2')
def testNameConflictsDoubleDigits(self):
componentNames = ['http-server-ondemand'] + [
'http-server-ondemand%d' % i for i in range(2, 10)]
self.save.setExistingComponentNames(componentNames)
self._addServer('ondemand')
self.save.getXML()
components = self.save.getAtmosphereComponents()
self.assertEquals(components[0].name, 'http-server-ondemand10')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -6,053,539,841,310,184,000 | 42.92986 | 78 | 0.467543 | false |
mikicaivosevic/flask-cassandra-sessions | tests/test_session.py | 1 | 2092 | import json
from time import sleep
from unittest import TestCase
from cassandra.cluster import Cluster
from flask import Flask, session
from cassandra_flask_sessions import CassandraSessionInterface, AbstractConnectionProvider
class ConnectionProvider(AbstractConnectionProvider):
_connection = Cluster(['127.0.0.1']).connect('tests')
def get_connection(self):
return self._connection
_app = Flask(__name__)
_app.session_interface = CassandraSessionInterface(ConnectionProvider())
@_app.route('/set/<name>')
def set_session(name):
session['name'] = name
return 'ok'
@_app.route('/get')
def get_session():
return json.dumps(dict(session))
@_app.route('/delete')
def delete_session():
session.clear()
return 'ok'
_app.testing = True
_app.app_context().push()
class TestCassandraSessionInterface(TestCase):
def test_set_get_delete(self):
name = 'Mikica'
with _app.test_client() as client:
session_data = client.get('/set/%s' % name)
client.set_cookie('localhost', session_data.headers[2][0], session_data.headers[2][1])
session_data = client.get('/get')
self.assertEqual(json.dumps({'name': name}), session_data.data)
client.get('/delete')
session_data = client.get('/get')
self.assertEqual('{}', session_data.data)
def test_lifetime_interval(self):
name = 'Mikica'
session_lifetime = _app.config['PERMANENT_SESSION_LIFETIME']
_app.config.update({'PERMANENT_SESSION_LIFETIME': 1})
with _app.test_client() as client:
session_data = client.get('/set/%s' % name)
client.set_cookie('localhost', session_data.headers[2][0], session_data.headers[2][1])
session_data = client.get('/get')
self.assertEqual(json.dumps({'name': name}), session_data.data)
sleep(2)
session_data = client.get('/get')
self.assertEqual('{}', session_data.data)
_app.config.update({'PERMANENT_SESSION_LIFETIME': session_lifetime})
| mit | -7,179,838,590,307,754,000 | 26.168831 | 98 | 0.639101 | false |
PapenfussLab/MHC-clogs | bin/fasta_rename_by_pos.py | 1 | 1734 | #!/usr/bin/env python
"""
fasta_rename_by_pos.py
"""
from argparse import ArgumentParser
from mungolite.fasta import FastaFile
from srt.intervals import GenomeIntersector
def get_position(tokens):
for token in tokens:
if "Position" in token:
for delim in ["=", ":", "(", ")"]:
token = token.replace(delim, " ")
tokens2 = token.split()
chrom = tokens2[1]
pos = tokens2[2]
strand = tokens2[3]
start, end = [int(x) for x in pos.split("-")]
return chrom, start, end, strand
raise Exception("Could not find position")
parser = ArgumentParser()
parser.add_argument("data_filename", type=str, help="Data filename")
parser.add_argument("input_filename", type=str, help="Input filename")
parser.add_argument("output_filename", type=str, help="Output filename")
args = parser.parse_args()
intersector = GenomeIntersector()
# >mdUT1 Chain=chain39 Position=1:705246778-705258088(+) GeneID=None ProteinID=None Score=82.8 E-value=6.8e-24 Length=11311 Comment=No overlapping annotations
for h,s in FastaFile(args.data_filename):
tokens = h.split()
name = tokens[0]
chrom, start, end, strand = get_position(tokens)
intersector.add((chrom, strand), start, end, name)
output_filename = FastaFile(args.output_filename, "w")
for h,s in FastaFile(args.input_filename):
tokens = h.split()
name = tokens[0]
chrom, start, end, strand = get_position(tokens)
rs = intersector.find((chrom, strand), start, end)
if rs:
new_name = rs[0].value
print "fasta_rename_UTs_by_pos.py:", name, new_name
h = "%s %s" % (new_name, " ".join(tokens[1:]))
output_filename.write(h, s)
| artistic-2.0 | -8,437,597,413,394,435,000 | 30.527273 | 158 | 0.643022 | false |
ibab/tensorprob | tensorprob/utilities.py | 1 | 1469 | from collections import defaultdict, Iterable
import itertools
import numpy as np
import tensorflow as tf
from six.moves import zip_longest
NAME_COUNTERS = defaultdict(lambda: 0)
def generate_name(obj):
"""Generate a unique name for the object in question
Returns a name of the form "{calling_class_name}_{count}"
"""
global NAME_COUNTERS
calling_name = obj.__name__
NAME_COUNTERS[calling_name] += 1
return '{0}_{1}'.format(calling_name, NAME_COUNTERS[calling_name])
class classproperty(object):
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter(owner)
def grouper(iterable, n=2, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def flatten(l):
"""Recursivly flattens a interable argument, ignoring strings and bytes.
Taken from: http://stackoverflow.com/a/2158532
"""
for el in l:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
for sub in flatten(el):
yield sub
else:
yield el
def is_finite(obj):
return isinstance(obj, tf.Tensor) or np.isfinite(obj)
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
| mit | 3,968,733,909,219,394,000 | 23.081967 | 76 | 0.639891 | false |
rseedorff/pi-kiosk | helloWorld.py | 1 | 5178 | from flask import Flask, jsonify
import datetime
import time
import atexit
import RPi.GPIO as GPIO
app = Flask(__name__)
#####################################################
# Initialise GPIO Board and setup all pins #
#####################################################
# Green LED at Pin 7
LED_GREEN = 7
LED_RED = 11
PIR = 12
GPIO.setmode(GPIO.BOARD) ## Use board pin numbering
GPIO.setup(LED_GREEN, GPIO.OUT, initial=GPIO.LOW) ## Setup GPIO Pin LED_GREEN to OUT (3.3V)
GPIO.setup(LED_RED, GPIO.OUT, initial=GPIO.LOW) ## Setup GPIO Pin LED_RED to OUT (3.3V)
GPIO.setup(PIR, GPIO.IN) ## Setup GPIO Pin PIR to IN
# Initialise PIT states
STATE_PIR_CURRENT = 0
STATE_PIR_LAST = 0
#####################################################
# REST Services #
#####################################################
# This route will return a object in JSON format
@app.route('/api/pir')
def pir():
try:
print "%s: Sensor initialisieren ..." % datetime.datetime.now()
# wait for PIR sensor
while (GPIO.input(PIR) == GPIO.HIGH):
STATE_PIR_CURRENT = 0
print "%s: Fertig! Warte auf Beweung ..." % datetime.datetime.now()
for i in range(0, 500):
STATE_PIR_CURRENT = GPIO.input(PIR)
print "Iteration " + str(i+1) + " current state:" + str(STATE_PIR_CURRENT)
if (STATE_PIR_CURRENT == 1 and STATE_PIR_LAST == 0 ):
print "%s: Bewegung erkannt!..." % datetime.datetime.now()
STATE_PIR_LAST = 1
elif (STATE_PIR_CURRENT == 0 and STATE_PIR_LAST == 1):
print "%s: Bewegung beendet!..." % datetime.datetime.now()
STATE_PIR_LAST = 0
time.sleep(0.05) ## Wait for sleep seconds
print " Done !"
except KeyboardInterrupt:
print "exit ..."
GPIO.cleanup()
return jsonify(result='Hello PIR !')
# This route will return a object in JSON format
@app.route('/')
def index():
now = datetime.datetime.now()
return jsonify(result='Hello World !')
# This route will turn on a LED_GREEN
@app.route('/api/led/on')
def led_on():
try:
if ( GPIO.input(LED_GREEN) == GPIO.LOW ):
print "Turn LED_GREEN 'ON' at PIN: '"+ str(LED_GREEN) +"' !"
GPIO.output(LED_GREEN, True) ## Turn on GPIO pin LED_GREEN, if it's off
else:
print "LED_GREEN is already 'ON' at PIN: '"+ str(LED_GREEN) +"' !"
except:
## do some logging...
GPIO.cleanup()
print "Unexpected error: ", sys.exc_info()[0]
return jsonify(led='on', pin=LED_GREEN)
# This route will turn on a LED_GREEN
@app.route('/api/led/off')
def led_off():
try:
if ( GPIO.input(LED_GREEN) == GPIO.HIGH ):
print "Turn LED_GREEN 'OFF' at PIN: '"+ str(LED_GREEN) +"' !"
GPIO.output(LED_GREEN, False) ## Turn off GPIO pin LED_GREEN, if it's on
else:
print "LED_GREEN is already 'OFF' at PIN: '"+ str(LED_GREEN) +"' !"
except:
## do some logging...
GPIO.cleanup()
print "Unexpected error: ", sys.exc_info()[0]
return jsonify(led='off', pin=LED_GREEN)
# This route will toogle some cool functions :)
@app.route('/api/led/toggle')
def toggle():
result = 'Hello Toggle !'
try:
if ( GPIO.input(LED_GREEN) == GPIO.HIGH ):
print "Toggle LED_GREEN ON!"
GPIO.output(LED_GREEN, False) ## Turn off GPIO pin 7, if it's on
result = 'Pin number 7 turned off (was on)'
else:
print "Toggle LED_GREEN OFF !"
GPIO.output(LED_GREEN, True) ## Turn on GPIO pin 7, if it's off
result = 'Pin number 7 turned on (was off)'
except:
## do some logging...
now = datetime.datetime.now()
GPIO.cleanup()
print "Exception!"
return jsonify(result=result, led=GPIO.input(LED_GREEN), pin=LED_GREEN)
# This route will toogle some cool functions :)
@app.route('/api/led/blink')
@app.route('/api/led/blink/<float:speed>/')
@app.route('/api/led/blink/<float:speed>/<int:numTimes>')
def blink(speed=0.1, numTimes=50):
try:
for i in range(0, numTimes):
print "Iteration " + str(i+1)
GPIO.output(LED_GREEN, True) ## Turn on GPIO pin LED_GREEN
time.sleep(speed) ## Wait for sleep seconds
GPIO.output(LED_GREEN, False) ## Turn off GPIO pin LED_GREEN
time.sleep(speed) ## Wait for sleep seconds
print " Done "
except:
## do some logging...
now = datetime.datetime.now()
GPIO.cleanup()
print "Exception!"
return jsonify(result="Blinking", led=GPIO.input(LED_GREEN), pin=LED_GREEN)
@app.errorhandler(Exception)
def catch_all_exception_handler(error):
GPIO.cleanup() ## On error cleanup all GPIO Pins (hard reset)!
return 'Error', 500
def cleanup():
GPIO.cleanup() ## On shutdown clean all GPIO Pins!
print "Cleanup due to shutdown this server!"
if __name__ == '__main__':
app.debug = True
app.run()
atexit.register(cleanup)
| apache-2.0 | -4,223,882,474,366,889,000 | 31.772152 | 91 | 0.556779 | false |
Austriker/LuxLogger | tsl2561.py | 1 | 10465 | '''Driver for the TSL2561 digital luminosity (light) sensors.
Pick one up at http://www.adafruit.com/products/439
Adafruit invests time and resources providing this open source code,
please support Adafruit and open-source hardware by purchasing
products from Adafruit!
Code ported from Adafruit Arduino library,
commit ced9f731da5095988cd66158562c2fde659e0510:
https://github.com/adafruit/Adafruit_TSL2561
'''
import time
from adafruit_i2c import Adafruit_I2C
from constants import *
import json
__author__ = 'Hugo SERRAT'
__credits__ = [
'K.Townsend (Adafruit Industries)',
'Georges Toth <[email protected]>'
]
__license__ = 'BSD'
__version__ = 'v3.1'
'''HISTORY
v3.1 - Removed exception when sensor is saturated
v3.0 - Rewrote the i2c lib to make it work with python3
v2.0 - Rewrote driver for Adafruit_Sensor and Auto-Gain support, and
added lux clipping check (returns 0 lux on sensor saturation)
v1.0 - First release (previously TSL2561)
'''
class TSL2561(object):
'''Driver for the TSL2561 digital luminosity (light) sensors.'''
def __init__(self, address=None,
integration_time=TSL2561_DELAY_INTTIME_402MS,
gain=TSL2561_GAIN_1X, autogain=False, debug=False):
if address is not None:
self.address = address
else:
self.address = TSL2561_ADDR_FLOAT
self.i2c = Adafruit_I2C(self.address)
self.debug = debug
self.integration_time = integration_time
self.gain = gain
self.autogain = autogain
self._begin()
def _begin(self):
'''Initializes I2C and configures the sensor (call this function before
doing anything else)
'''
# Make sure we're actually connected
x = self.i2c.readU8(TSL2561_REGISTER_ID)
if not x & 0x0A:
raise Exception('TSL2561 not found!')
##########
# Set default integration time and gain
self.set_integration_time(self.integration_time)
self.set_gain(self.gain)
# Note: by default, the device is in power down mode on bootup
self.disable()
def enable(self):
'''Enable the device by setting the control bit to 0x03'''
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_CONTROL,
TSL2561_CONTROL_POWERON)
def disable(self):
'''Disables the device (putting it in lower power sleep mode)'''
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_CONTROL,
TSL2561_CONTROL_POWEROFF)
@staticmethod
def delay(value):
'''Delay times must be specified in milliseconds but as the python
sleep function only takes (float) seconds we need to convert the sleep
time first
'''
time.sleep(value / 1000.0)
def _get_data(self):
'''Private function to read luminosity on both channels'''
# Enable the device by setting the control bit to 0x03
self.enable()
# Wait x ms for ADC to complete
TSL2561.delay(self.integration_time)
# Reads a two byte value from channel 0 (visible + infrared)
broadband = self.i2c.readU16(TSL2561_COMMAND_BIT | TSL2561_WORD_BIT |
TSL2561_REGISTER_CHAN0_LOW)
# Reads a two byte value from channel 1 (infrared)
ir = self.i2c.readU16(TSL2561_COMMAND_BIT | TSL2561_WORD_BIT |
TSL2561_REGISTER_CHAN1_LOW)
# Turn the device off to save power
self.disable()
return (broadband, ir)
def set_integration_time(self, integration_time):
'''Sets the integration time for the TSL2561'''
# Enable the device by setting the control bit to 0x03
self.enable()
self.integration_time = integration_time
# Update the timing register
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_TIMING,
self.integration_time | self.gain)
# Turn the device off to save power
self.disable()
def set_gain(self, gain):
'''Adjusts the gain on the TSL2561 (adjusts the sensitivity to light)
'''
# Enable the device by setting the control bit to 0x03
self.enable()
self.gain = gain
# Update the timing register
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_TIMING,
self.integration_time | self.gain)
# Turn the device off to save power
self.disable()
def set_auto_range(self, value):
'''Enables or disables the auto-gain settings when reading
data from the sensor
'''
self.autogain = value
def _get_luminosity(self):
'''Gets the broadband (mixed lighting) and IR only values from
the TSL2561, adjusting gain if auto-gain is enabled
'''
valid = False
# If Auto gain disabled get a single reading and continue
if not self.autogain:
return self._get_data()
# Read data until we find a valid range
_agcCheck = False
broadband = 0
ir = 0
while not valid:
if self.integration_time == TSL2561_INTEGRATIONTIME_13MS:
_hi = TSL2561_AGC_THI_13MS
_lo = TSL2561_AGC_TLO_13MS
elif self.integration_time == TSL2561_INTEGRATIONTIME_101MS:
_hi = TSL2561_AGC_THI_101MS
_lo = TSL2561_AGC_TLO_101MS
else:
_hi = TSL2561_AGC_THI_402MS
_lo = TSL2561_AGC_TLO_402MS
_b, _ir = self._get_data()
# Run an auto-gain check if we haven't already done so ...
if not _agcCheck:
if _b < _lo and self.gain == TSL2561_GAIN_1X:
# Increase the gain and try again
self.set_gain(TSL2561_GAIN_16X)
# Drop the previous conversion results
_b, _ir = self._get_data()
# Set a flag to indicate we've adjusted the gain
_agcCheck = True
elif _b > _hi and self.gain == TSL2561_GAIN_16X:
# Drop gain to 1x and try again
self.set_gain(TSL2561_GAIN_1X)
# Drop the previous conversion results
_b, _ir = self._get_data()
# Set a flag to indicate we've adjusted the gain
_agcCheck = True
else:
# Nothing to look at here, keep moving ....
# Reading is either valid, or we're already at the chips
# limits
broadband = _b
ir = _ir
valid = True
else:
# If we've already adjusted the gain once, just return the new
# results.
# This avoids endless loops where a value is at one extreme
# pre-gain, and the the other extreme post-gain
broadband = _b
ir = _ir
valid = True
return (broadband, ir)
def _calculate_lux(self, broadband, ir):
'''Converts the raw sensor values to the standard SI lux equivalent.
Returns 0 if the sensor is saturated and the values are unreliable.
'''
# Make sure the sensor isn't saturated!
if self.integration_time == TSL2561_INTEGRATIONTIME_13MS:
clipThreshold = TSL2561_CLIPPING_13MS
elif self.integration_time == TSL2561_INTEGRATIONTIME_101MS:
clipThreshold = TSL2561_CLIPPING_101MS
else:
clipThreshold = TSL2561_CLIPPING_402MS
# Return max value 65535 lux if the sensor is saturated
if broadband > clipThreshold or ir > clipThreshold:
return 65535
# Get the correct scale depending on the integration time
if self.integration_time == TSL2561_INTEGRATIONTIME_13MS:
chScale = TSL2561_LUX_CHSCALE_TINT0
elif self.integration_time == TSL2561_INTEGRATIONTIME_101MS:
chScale = TSL2561_LUX_CHSCALE_TINT1
else:
chScale = 1 << TSL2561_LUX_CHSCALE
# Scale for gain (1x or 16x)
if not self.gain:
chScale = chScale << 4
# Scale the channel values
channel0 = (broadband * chScale) >> TSL2561_LUX_CHSCALE
channel1 = (ir * chScale) >> TSL2561_LUX_CHSCALE
# Find the ratio of the channel values (Channel1/Channel0)
ratio1 = 0
if channel0 != 0:
ratio1 = (channel1 << (TSL2561_LUX_RATIOSCALE + 1)) // channel0
# round the ratio value
ratio = (ratio1 + 1) >> 1
b = 0
m = 0
if ratio >= 0 and ratio <= TSL2561_LUX_K1T:
b = TSL2561_LUX_B1T
m = TSL2561_LUX_M1T
elif ratio <= TSL2561_LUX_K2T:
b = TSL2561_LUX_B2T
m = TSL2561_LUX_M2T
elif ratio <= TSL2561_LUX_K3T:
b = TSL2561_LUX_B3T
m = TSL2561_LUX_M3T
elif ratio <= TSL2561_LUX_K4T:
b = TSL2561_LUX_B4T
m = TSL2561_LUX_M4T
elif ratio <= TSL2561_LUX_K5T:
b = TSL2561_LUX_B5T
m = TSL2561_LUX_M5T
elif ratio <= TSL2561_LUX_K6T:
b = TSL2561_LUX_B6T
m = TSL2561_LUX_M6T
elif ratio <= TSL2561_LUX_K7T:
b = TSL2561_LUX_B7T
m = TSL2561_LUX_M7T
elif ratio > TSL2561_LUX_K8T:
b = TSL2561_LUX_B8T
m = TSL2561_LUX_M8T
temp = (channel0 * b) - (channel1 * m)
# Do not allow negative lux value
if temp < 0:
temp = 0
# Round lsb (2^(LUX_SCALE-1))
temp += 1 << (TSL2561_LUX_LUXSCALE - 1)
# Strip off fractional portion
lux = temp >> TSL2561_LUX_LUXSCALE
# Signal I2C had no errors
return lux
def lux(self):
'''Read sensor data, convert it to LUX and return it'''
broadband, ir = self._get_luminosity()
return self._calculate_lux(broadband, ir)
def getLuminosityDict(self):
data = {}
data['broadband'], data['ir'] = self._get_luminosity()
data['lux'] = self._calculate_lux(data['broadband'], data['ir'])
return data
def getLuminosityJson(self):
return json.dumps(self.getLuminosityDict())
| mit | -6,918,371,752,110,139,000 | 32.758065 | 79 | 0.57592 | false |
loaclhostjason/react-redux-admin | console/app/app.py | 1 | 1394 | # -*- coding: utf-8 -*-
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_babel import Babel
from flask_moment import Moment
from .assets import assets_env, bundles
from flask_caching import Cache
from config import Config
bootstrap = Bootstrap()
db = SQLAlchemy()
babel = Babel()
moment = Moment()
cache = Cache()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
# message in warning, error, success
login_manager.login_message = {'warning': "您还未登录!"}
login_manager.login_view = 'auth.login'
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
Config.init_app(app)
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
babel.init_app(app)
moment.init_app(app)
assets_env.init_app(app)
assets_env.register(bundles)
cache.init_app(app)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .deploy import deploy as deploy_blueprint
app.register_blueprint(deploy_blueprint, url_prefix='/deploy')
from .api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api')
return app
| mit | 5,873,634,761,814,845,000 | 24.62963 | 66 | 0.723988 | false |
afajl/sy | docs/conf.py | 1 | 6337 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('..'))
import sy
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.todo', 'sphinx.ext.ifconfig']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'latin1'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sy'
copyright = u'2009, Paul Diaconescu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'python'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['sy.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sy.tex', u'sy documentation',
u'Paul Diaconescu', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| bsd-3-clause | 8,141,396,965,501,481,000 | 31.167513 | 80 | 0.709957 | false |
erudit/zenon | eruditorg/apps/userspace/library/subscription_information/views.py | 1 | 1593 | # -*- coding: utf-8 -*-
from django.contrib import messages
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import FormView
from django.contrib.auth.mixins import LoginRequiredMixin
from base.viewmixins import MenuItemMixin
from ..viewmixins import OrganisationScopePermissionRequiredMixin
from .forms import SubscriptionInformationForm
class SubscriptionInformationUpdateView(
LoginRequiredMixin, OrganisationScopePermissionRequiredMixin, MenuItemMixin, FormView):
form_class = SubscriptionInformationForm
menu_library = 'subscription_information'
permission_required = 'library.has_access_to_dashboard'
template_name = 'userspace/library/subscription_information/update.html'
def get_form_kwargs(self):
kwargs = super(SubscriptionInformationUpdateView, self).get_form_kwargs()
kwargs.update({'organisation': self.current_organisation})
return kwargs
def form_valid(self, form):
form.save()
return super(SubscriptionInformationUpdateView, self).form_valid(form)
def get_success_url(self):
messages.success(
self.request, _("Le logo institutionnel a été mis à jour avec succès."))
return reverse(
'userspace:library:subscription_information:update',
args=(self.current_organisation.pk, ))
def get_context_data(self, **kwargs):
context = super(SubscriptionInformationUpdateView, self).get_context_data(**kwargs)
context['section_aside'] = True
return context
| gpl-3.0 | -7,446,036,456,903,838,000 | 36.833333 | 95 | 0.735053 | false |
lebauce/artub | pypoujol/sound.py | 1 | 1742 | # Glumol - An adventure game creator
# Copyright (C) 1998-2008 Sylvain Baubeau & Alexis Contour
# This file is part of Glumol.
# Glumol is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# Glumol is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Glumol. If not, see <http://www.gnu.org/licenses/>.
class Sound(object):
def __glumolinit__(self):
pass
def __init__(self, filename):
pass
def play(self):
pass
def stop(self):
pass
def get_playing(self):
return False
def set_playing(self, state):
pass
playing = property(get_playing, set_playing)
def get_echo(self):
return 0
def set_echo(self, echo):
pass
echo = property(get_echo, set_echo)
def get_invert_echo(self):
return False
def set_invert_echo(self, invert_echo):
pass
invert_echo = property(get_invert_echo, set_invert_echo)
def fade_to_volume(self, volume, duration):
pass
def get_volume(self):
return 1.0
def set_volume(self, volume):
pass
volume = property(get_volume, set_volume)
| gpl-2.0 | -4,415,843,273,799,223,300 | 24.757576 | 70 | 0.598163 | false |
jeffmahoney/supybot | src/commands.py | 2 | 33100 | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009-2010, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Includes wrappers for commands.
"""
import time
import Queue
import types
import getopt
import inspect
import threading
import multiprocessing
from . import callbacks, conf, ircdb, ircmsgs, ircutils, log, utils, world
###
# Non-arg wrappers -- these just change the behavior of a command without
# changing the arguments given to it.
###
# Thread has to be a non-arg wrapper because by the time we're parsing and
# validating arguments, we're inside the function we'd want to thread.
def thread(f):
"""Makes sure a command spawns a thread when called."""
def newf(self, irc, msg, args, *L, **kwargs):
if world.isMainThread():
targetArgs = (self.callingCommand, irc, msg, args) + tuple(L)
t = callbacks.CommandThread(target=self._callCommand,
args=targetArgs, kwargs=kwargs)
t.start()
else:
f(self, irc, msg, args, *L, **kwargs)
return utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
class ProcessTimeoutError(Exception):
"""Gets raised when a process is killed due to timeout."""
pass
def process(f, *args, **kwargs):
"""Runs a function <f> in a subprocess.
Several extra keyword arguments can be supplied.
<pn>, the pluginname, and <cn>, the command name, are strings used to
create the process name, for identification purposes.
<timeout>, if supplied, limits the length of execution of target
function to <timeout> seconds."""
timeout = kwargs.pop('timeout', None)
q = multiprocessing.Queue()
def newf(f, q, *args, **kwargs):
try:
r = f(*args, **kwargs)
q.put(r)
except Exception as e:
q.put(e)
targetArgs = (f, q,) + args
p = callbacks.CommandProcess(target=newf,
args=targetArgs, kwargs=kwargs)
p.start()
p.join(timeout)
if p.is_alive():
p.terminate()
raise ProcessTimeoutError, "%s aborted due to timeout." % (p.name,)
try:
v = q.get(block=False)
except Queue.Empty:
v = "Nothing returned."
if isinstance(v, Exception):
v = "Error: " + str(v)
return v
def regexp_wrapper(s, reobj, timeout, plugin_name, fcn_name):
'''A convenient wrapper to stuff regexp search queries through a subprocess.
This is used because specially-crafted regexps can use exponential time
and hang the bot.'''
def re_bool(s, reobj):
"""Since we can't enqueue match objects into the multiprocessing queue,
we'll just wrap the function to return bools."""
if reobj.search(s) is not None:
return True
else:
return False
try:
v = process(re_bool, s, reobj, timeout=timeout, pn=plugin_name, cn=fcn_name)
return v
except ProcessTimeoutError:
return False
class UrlSnarfThread(world.SupyThread):
def __init__(self, *args, **kwargs):
assert 'url' in kwargs
kwargs['name'] = 'Thread #%s (for snarfing %s)' % \
(world.threadsSpawned, kwargs.pop('url'))
super(UrlSnarfThread, self).__init__(*args, **kwargs)
self.setDaemon(True)
def run(self):
try:
super(UrlSnarfThread, self).run()
except utils.web.Error, e:
log.debug('Exception in urlSnarfer: %s', utils.exnToString(e))
class SnarfQueue(ircutils.FloodQueue):
timeout = conf.supybot.snarfThrottle
def key(self, channel):
return channel
_snarfed = SnarfQueue()
class SnarfIrc(object):
def __init__(self, irc, channel, url):
self.irc = irc
self.url = url
self.channel = channel
def __getattr__(self, attr):
return getattr(self.irc, attr)
def reply(self, *args, **kwargs):
_snarfed.enqueue(self.channel, self.url)
return self.irc.reply(*args, **kwargs)
# This lock is used to serialize the calls to snarfers, so
# earlier snarfers are guaranteed to beat out later snarfers.
_snarfLock = threading.Lock()
def urlSnarfer(f):
"""Protects the snarfer from loops (with other bots) and whatnot."""
def newf(self, irc, msg, match, *L, **kwargs):
url = match.group(0)
channel = msg.args[0]
if not irc.isChannel(channel) or (ircmsgs.isCtcp(msg) and not
ircmsgs.isAction(msg)):
return
if ircdb.channels.getChannel(channel).lobotomized:
self.log.debug('Not snarfing in %s: lobotomized.', channel)
return
if _snarfed.has(channel, url):
self.log.info('Throttling snarf of %s in %s.', url, channel)
return
irc = SnarfIrc(irc, channel, url)
def doSnarf():
_snarfLock.acquire()
try:
# This has to be *after* we've acquired the lock so we can be
# sure that all previous urlSnarfers have already run to
# completion.
if msg.repliedTo:
self.log.debug('Not snarfing, msg is already repliedTo.')
return
f(self, irc, msg, match, *L, **kwargs)
finally:
_snarfLock.release()
if threading.currentThread() is not world.mainThread:
doSnarf()
else:
L = list(L)
t = UrlSnarfThread(target=doSnarf, url=url)
t.start()
newf = utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
return newf
###
# Converters, which take irc, msg, args, and a state object, and build up the
# validated and converted args for the method in state.args.
###
# This is just so we can centralize this, since it may change.
def _int(s):
base = 10
if s.startswith('0x'):
base = 16
s = s[2:]
elif s.startswith('0b'):
base = 2
s = s[2:]
elif s.startswith('0') and len(s) > 1:
base = 8
s = s[1:]
try:
return int(s, base)
except ValueError:
if base == 10:
try:
return int(float(s))
except OverflowError:
raise ValueError('I don\'t understand numbers that large.')
else:
raise
def getInt(irc, msg, args, state, type='integer', p=None):
try:
i = _int(args[0])
if p is not None:
if not p(i):
state.errorInvalid(type, args[0])
state.args.append(i)
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getNonInt(irc, msg, args, state, type='non-integer value'):
try:
i = _int(args[0])
state.errorInvalid(type, args[0])
except ValueError:
state.args.append(args.pop(0))
def getLong(irc, msg, args, state, type='long'):
getInt(irc, msg, args, state, type)
state.args[-1] = long(state.args[-1])
def getFloat(irc, msg, args, state, type='floating point number'):
try:
state.args.append(float(args[0]))
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getPositiveInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>0, type='positive integer', *L)
def getNonNegativeInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>=0, type='non-negative integer', *L)
def getIndex(irc, msg, args, state):
getInt(irc, msg, args, state, type='index')
if state.args[-1] > 0:
state.args[-1] -= 1
def getId(irc, msg, args, state, kind=None):
type = 'id'
if kind is not None and not kind.endswith('id'):
type = kind + ' id'
original = args[0]
try:
args[0] = args[0].lstrip('#')
getInt(irc, msg, args, state, type=type)
except Exception, e:
args[0] = original
raise
def getExpiry(irc, msg, args, state):
now = int(time.time())
try:
expires = _int(args[0])
if expires:
expires += now
state.args.append(expires)
del args[0]
except ValueError:
state.errorInvalid('number of seconds', args[0])
def getBoolean(irc, msg, args, state):
try:
state.args.append(utils.str.toBool(args[0]))
del args[0]
except ValueError:
state.errorInvalid('boolean', args[0])
def getNetworkIrc(irc, msg, args, state, errorIfNoMatch=False):
if args:
for otherIrc in world.ircs:
if otherIrc.network.lower() == args[0].lower():
state.args.append(otherIrc)
del args[0]
return
if errorIfNoMatch:
raise callbacks.ArgumentError
else:
state.args.append(irc)
def getHaveOp(irc, msg, args, state, action='do that'):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error('I\'m not even in %s.' % state.channel, Raise=True)
if not irc.state.channels[state.channel].isOp(irc.nick):
state.error('I need to be opped to %s.' % action, Raise=True)
def validChannel(irc, msg, args, state):
if irc.isChannel(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('channel', args[0])
def getHostmask(irc, msg, args, state):
if ircutils.isUserHostmask(args[0]):
state.args.append(args.pop(0))
else:
try:
hostmask = irc.state.nickToHostmask(args[0])
state.args.append(hostmask)
del args[0]
except KeyError:
state.errorInvalid('nick or hostmask', args[0])
def getBanmask(irc, msg, args, state):
getHostmask(irc, msg, args, state)
getChannel(irc, msg, args, state)
channel = state.channel
banmaskstyle = conf.supybot.protocols.irc.banmask
state.args[-1] = banmaskstyle.makeBanmask(state.args[-1])
def getUser(irc, msg, args, state):
try:
state.args.append(ircdb.users.getUser(msg.prefix))
except KeyError:
state.errorNotRegistered(Raise=True)
def getOtherUser(irc, msg, args, state):
# Although ircdb.users.getUser could accept a hostmask, we're explicitly
# excluding that from our interface with this check
if ircutils.isUserHostmask(args[0]):
state.errorNoUser(args[0])
try:
state.args.append(ircdb.users.getUser(args[0]))
del args[0]
except KeyError:
try:
getHostmask(irc, msg, [args[0]], state)
hostmask = state.args.pop()
state.args.append(ircdb.users.getUser(hostmask))
del args[0]
except (KeyError, callbacks.Error):
state.errorNoUser(name=args[0])
def _getRe(f):
def get(irc, msg, args, state, convert=True):
original = args[:]
s = args.pop(0)
def isRe(s):
try:
_ = f(s)
return True
except ValueError:
return False
try:
while len(s) < 512 and not isRe(s):
s += ' ' + args.pop(0)
if len(s) < 512:
if convert:
state.args.append(f(s))
else:
state.args.append(s)
else:
state.errorInvalid('regular expression', s)
except IndexError:
args[:] = original
state.errorInvalid('regular expression', s)
return get
getMatcher = _getRe(utils.str.perlReToPythonRe)
getReplacer = _getRe(utils.str.perlReToReplacer)
def getNick(irc, msg, args, state):
if ircutils.isNick(args[0]):
if 'nicklen' in irc.state.supported:
if len(args[0]) > irc.state.supported['nicklen']:
state.errorInvalid('nick', args[0],
'That nick is too long for this server.')
state.args.append(args.pop(0))
else:
state.errorInvalid('nick', args[0])
def getSeenNick(irc, msg, args, state, errmsg=None):
try:
_ = irc.state.nickToHostmask(args[0])
state.args.append(args.pop(0))
except KeyError:
if errmsg is None:
errmsg = 'I haven\'t seen %s.' % args[0]
state.error(errmsg, Raise=True)
def getChannel(irc, msg, args, state):
if state.channel:
return
if args and irc.isChannel(args[0]):
channel = args.pop(0)
elif irc.isChannel(msg.args[0]):
channel = msg.args[0]
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.channel = channel
state.args.append(channel)
def getChannelDb(irc, msg, args, state, **kwargs):
channelSpecific = conf.supybot.databases.plugins.channelSpecific
try:
getChannel(irc, msg, args, state, **kwargs)
channel = channelSpecific.getChannelLink(state.channel)
state.channel = channel
state.args[-1] = channel
except (callbacks.ArgumentError, IndexError):
if channelSpecific():
raise
channel = channelSpecific.link()
if not conf.get(channelSpecific.link.allow, channel):
log.warning('channelSpecific.link is globally set to %s, but '
'%s disallowed linking to its db.', channel, channel)
raise
else:
channel = channelSpecific.getChannelLink(channel)
state.channel = channel
state.args.append(channel)
def inChannel(irc, msg, args, state):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error('I\'m not in %s.' % state.channel, Raise=True)
def onlyInChannel(irc, msg, args, state):
if not (irc.isChannel(msg.args[0]) and msg.args[0] in irc.state.channels):
state.error('This command may only be given in a channel that I am in.',
Raise=True)
else:
state.channel = msg.args[0]
state.args.append(state.channel)
def callerInGivenChannel(irc, msg, args, state):
channel = args[0]
if irc.isChannel(channel):
if channel in irc.state.channels:
if msg.nick in irc.state.channels[channel].users:
state.args.append(args.pop(0))
else:
state.error('You must be in %s.' % channel, Raise=True)
else:
state.error('I\'m not in %s.' % channel, Raise=True)
else:
state.errorInvalid('channel', args[0])
def nickInChannel(irc, msg, args, state):
originalArgs = state.args[:]
inChannel(irc, msg, args, state)
state.args = originalArgs
if args[0] not in irc.state.channels[state.channel].users:
state.error('%s is not in %s.' % (args[0], state.channel), Raise=True)
state.args.append(args.pop(0))
def getChannelOrNone(irc, msg, args, state):
try:
getChannel(irc, msg, args, state)
except callbacks.ArgumentError:
state.args.append(None)
def checkChannelCapability(irc, msg, args, state, cap):
getChannel(irc, msg, args, state)
cap = ircdb.canonicalCapability(cap)
cap = ircdb.makeChannelCapability(state.channel, cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def getOp(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'op')
def getHalfop(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'halfop')
def getVoice(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'voice')
def getLowered(irc, msg, args, state):
state.args.append(ircutils.toLower(args.pop(0)))
def getSomething(irc, msg, args, state, errorMsg=None, p=None):
if p is None:
p = lambda _: True
if not args[0] or not p(args[0]):
if errorMsg is None:
errorMsg = 'You must not give the empty string as an argument.'
state.error(errorMsg, Raise=True)
else:
state.args.append(args.pop(0))
def getSomethingNoSpaces(irc, msg, args, state, errorMsg=None):
def p(s):
return len(s.split(None, 1)) == 1
if errorMsg is None:
errorMsg='You must not give a string containing spaces as an argument.'
getSomething(irc, msg, args, state, errorMsg=errorMsg, p=p)
def private(irc, msg, args, state):
if irc.isChannel(msg.args[0]):
state.errorRequiresPrivacy(Raise=True)
def public(irc, msg, args, state, errmsg=None):
if not irc.isChannel(msg.args[0]):
if errmsg is None:
errmsg = 'This message must be sent in a channel.'
state.error(errmsg, Raise=True)
def checkCapability(irc, msg, args, state, cap):
cap = ircdb.canonicalCapability(cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def owner(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'owner')
def admin(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'admin')
def anything(irc, msg, args, state):
state.args.append(args.pop(0))
def getGlob(irc, msg, args, state):
glob = args.pop(0)
if '*' not in glob and '?' not in glob:
glob = '*%s*' % glob
state.args.append(glob)
def getUrl(irc, msg, args, state):
if utils.web.urlRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('url', args[0])
def getEmail(irc, msg, args, state):
if utils.net.emailRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('email', args[0])
def getHttpUrl(irc, msg, args, state):
if utils.web.httpUrlRe.match(args[0]):
state.args.append(args.pop(0))
elif utils.web.httpUrlRe.match('http://' + args[0]):
state.args.append('http://' + args.pop(0))
else:
state.errorInvalid('http url', args[0])
def getNow(irc, msg, args, state):
state.args.append(int(time.time()))
def getCommandName(irc, msg, args, state):
if ' ' in args[0]:
state.errorInvalid('command name', args[0])
else:
state.args.append(callbacks.canonicalName(args.pop(0)))
def getIp(irc, msg, args, state):
if utils.net.isIP(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('ip', args[0])
def getLetter(irc, msg, args, state):
if len(args[0]) == 1:
state.args.append(args.pop(0))
else:
state.errorInvalid('letter', args[0])
def getMatch(irc, msg, args, state, regexp, errmsg):
m = regexp.search(args[0])
if m is not None:
state.args.append(m)
del args[0]
else:
state.error(errmsg, Raise=True)
def getLiteral(irc, msg, args, state, literals, errmsg=None):
# ??? Should we allow abbreviations?
if isinstance(literals, basestring):
literals = (literals,)
abbrevs = utils.abbrev(literals)
if args[0] in abbrevs:
state.args.append(abbrevs[args.pop(0)])
elif errmsg is not None:
state.error(errmsg, Raise=True)
else:
raise callbacks.ArgumentError
def getTo(irc, msg, args, state):
if args[0].lower() == 'to':
args.pop(0)
def getPlugin(irc, msg, args, state, require=True):
cb = irc.getCallback(args[0])
if cb is not None:
state.args.append(cb)
del args[0]
elif require:
state.errorInvalid('plugin', args[0])
else:
state.args.append(None)
def getIrcColor(irc, msg, args, state):
if args[0] in ircutils.mircColors:
state.args.append(ircutils.mircColors[args.pop(0)])
else:
state.errorInvalid('irc color')
def getText(irc, msg, args, state):
if args:
state.args.append(' '.join(args))
args[:] = []
else:
raise IndexError
wrappers = ircutils.IrcDict({
'admin': admin,
'anything': anything,
'banmask': getBanmask,
'boolean': getBoolean,
'callerInGivenChannel': callerInGivenChannel,
'capability': getSomethingNoSpaces,
'channel': getChannel,
'channelDb': getChannelDb,
'checkCapability': checkCapability,
'checkChannelCapability': checkChannelCapability,
'color': getIrcColor,
'commandName': getCommandName,
'email': getEmail,
'expiry': getExpiry,
'filename': getSomething, # XXX Check for validity.
'float': getFloat,
'glob': getGlob,
'halfop': getHalfop,
'haveOp': getHaveOp,
'hostmask': getHostmask,
'httpUrl': getHttpUrl,
'id': getId,
'inChannel': inChannel,
'index': getIndex,
'int': getInt,
'ip': getIp,
'letter': getLetter,
'literal': getLiteral,
'long': getLong,
'lowered': getLowered,
'matches': getMatch,
'networkIrc': getNetworkIrc,
'nick': getNick,
'nickInChannel': nickInChannel,
'nonInt': getNonInt,
'nonNegativeInt': getNonNegativeInt,
'now': getNow,
'onlyInChannel': onlyInChannel,
'op': getOp,
'otherUser': getOtherUser,
'owner': owner,
'plugin': getPlugin,
'positiveInt': getPositiveInt,
'private': private,
'public': public,
'regexpMatcher': getMatcher,
'regexpReplacer': getReplacer,
'seenNick': getSeenNick,
'something': getSomething,
'somethingWithoutSpaces': getSomethingNoSpaces,
'text': getText,
'to': getTo,
'url': getUrl,
'user': getUser,
'validChannel': validChannel,
'voice': getVoice,
})
def addConverter(name, wrapper):
wrappers[name] = wrapper
class UnknownConverter(KeyError):
pass
def getConverter(name):
try:
return wrappers[name]
except KeyError, e:
raise UnknownConverter, str(e)
def callConverter(name, irc, msg, args, state, *L):
getConverter(name)(irc, msg, args, state, *L)
###
# Contexts. These determine what the nature of conversions is; whether they're
# defaulted, or many of them are allowed, etc. Contexts should be reusable;
# i.e., they should not maintain state between calls.
###
def contextify(spec):
if not isinstance(spec, context):
spec = context(spec)
return spec
def setDefault(state, default):
if callable(default):
state.args.append(default())
else:
state.args.append(default)
class context(object):
def __init__(self, spec):
self.args = ()
self.spec = spec # for repr
if isinstance(spec, tuple):
assert spec, 'tuple spec must not be empty.'
self.args = spec[1:]
self.converter = getConverter(spec[0])
elif spec is None:
self.converter = getConverter('anything')
elif isinstance(spec, basestring):
self.args = ()
self.converter = getConverter(spec)
else:
assert isinstance(spec, context)
self.converter = spec
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
self.converter(irc, msg, args, state, *self.args)
log.debug('args after %r: %r', self, args)
def __repr__(self):
return '<%s for %s>' % (self.__class__.__name__, self.spec)
class rest(context):
def __call__(self, irc, msg, args, state):
if args:
original = args[:]
args[:] = [' '.join(args)]
try:
super(rest, self).__call__(irc, msg, args, state)
except Exception, e:
args[:] = original
else:
raise IndexError
# additional means: Look for this (and make sure it's of this type). If
# there are no arguments for us to check, then use our default.
class additional(context):
def __init__(self, spec, default=None):
self.__parent = super(additional, self)
self.__parent.__init__(spec)
self.default = default
def __call__(self, irc, msg, args, state):
try:
self.__parent.__call__(irc, msg, args, state)
except IndexError:
log.debug('Got IndexError, returning default.')
setDefault(state, self.default)
# optional means: Look for this, but if it's not the type I'm expecting or
# there are no arguments for us to check, then use the default value.
class optional(additional):
def __call__(self, irc, msg, args, state):
try:
super(optional, self).__call__(irc, msg, args, state)
except (callbacks.ArgumentError, callbacks.Error), e:
log.debug('Got %s, returning default.', utils.exnToString(e))
state.errored = False
setDefault(state, self.default)
class any(context):
def __init__(self, spec, continueOnError=False):
self.__parent = super(any, self)
self.__parent.__init__(spec)
self.continueOnError = continueOnError
def __call__(self, irc, msg, args, state):
st = state.essence()
try:
while args:
self.__parent.__call__(irc, msg, args, st)
except IndexError:
pass
except (callbacks.ArgumentError, callbacks.Error), e:
if not self.continueOnError:
raise
else:
log.debug('Got %s, returning default.', utils.exnToString(e))
pass
state.args.append(st.args)
class many(any):
def __call__(self, irc, msg, args, state):
super(many, self).__call__(irc, msg, args, state)
if not state.args[-1]:
state.args.pop()
raise callbacks.ArgumentError
class first(context):
def __init__(self, *specs, **kw):
if 'default' in kw:
self.default = kw.pop('default')
assert not kw, 'Bad kwargs for first.__init__'
self.spec = specs # for __repr__
self.specs = map(contextify, specs)
def __call__(self, irc, msg, args, state):
errored = False
for spec in self.specs:
try:
spec(irc, msg, args, state)
return
except Exception, e:
errored = state.errored
state.errored = False
continue
if hasattr(self, 'default'):
state.args.append(self.default)
else:
state.errored = errored
raise e
class reverse(context):
def __call__(self, irc, msg, args, state):
args[:] = args[::-1]
super(reverse, self).__call__(irc, msg, args, state)
args[:] = args[::-1]
class commalist(context):
def __call__(self, irc, msg, args, state):
original = args[:]
st = state.essence()
trailingComma = True
try:
while trailingComma:
arg = args.pop(0)
if not arg.endswith(','):
trailingComma = False
for part in arg.split(','):
if part: # trailing commas
super(commalist, self).__call__(irc, msg, [part], st)
state.args.append(st.args)
except Exception, e:
args[:] = original
raise
class getopts(context):
"""The empty string indicates that no argument is taken; None indicates
that there is no converter for the argument."""
def __init__(self, getopts):
self.spec = getopts # for repr
self.getopts = {}
self.getoptL = []
for (name, spec) in getopts.iteritems():
if spec == '':
self.getoptL.append(name)
self.getopts[name] = None
else:
self.getoptL.append(name + '=')
self.getopts[name] = contextify(spec)
log.debug('getopts: %r', self.getopts)
log.debug('getoptL: %r', self.getoptL)
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
(optlist, rest) = getopt.getopt(args, '', self.getoptL)
getopts = []
for (opt, arg) in optlist:
opt = opt[2:] # Strip --
log.debug('opt: %r, arg: %r', opt, arg)
context = self.getopts[opt]
if context is not None:
st = state.essence()
context(irc, msg, [arg], st)
assert len(st.args) == 1
getopts.append((opt, st.args[0]))
else:
getopts.append((opt, True))
state.args.append(getopts)
args[:] = rest
log.debug('args after %r: %r', self, args)
###
# This is our state object, passed to converters along with irc, msg, and args.
###
class State(object):
log = log
def __init__(self, types):
self.args = []
self.kwargs = {}
self.types = types
self.channel = None
self.errored = False
def __getattr__(self, attr):
if attr.startswith('error'):
self.errored = True
return getattr(dynamic.irc, attr)
else:
raise AttributeError, attr
def essence(self):
st = State(self.types)
for (attr, value) in self.__dict__.iteritems():
if attr not in ('args', 'kwargs'):
setattr(st, attr, value)
return st
def __repr__(self):
return '%s(args=%r, kwargs=%r, channel=%r)' % (self.__class__.__name__,
self.args, self.kwargs,
self.channel)
###
# This is a compiled Spec object.
###
class Spec(object):
def _state(self, types, attrs={}):
st = State(types)
st.__dict__.update(attrs)
st.allowExtra = self.allowExtra
return st
def __init__(self, types, allowExtra=False):
self.types = types
self.allowExtra = allowExtra
utils.seq.mapinto(contextify, self.types)
def __call__(self, irc, msg, args, stateAttrs={}):
state = self._state(self.types[:], stateAttrs)
while state.types:
context = state.types.pop(0)
try:
context(irc, msg, args, state)
except IndexError:
raise callbacks.ArgumentError
if args and not state.allowExtra:
log.debug('args and not self.allowExtra: %r', args)
raise callbacks.ArgumentError
return state
def wrap(f, specList=[], name=None, **kw):
name = name or f.func_name
spec = Spec(specList, **kw)
def newf(self, irc, msg, args, **kwargs):
state = spec(irc, msg, args, stateAttrs={'cb': self, 'log': self.log})
self.log.debug('State before call: %s', state)
if state.errored:
self.log.debug('Refusing to call %s due to state.errored.', f)
else:
try:
f(self, irc, msg, args, *state.args, **state.kwargs)
except TypeError:
self.log.error('Spec: %s', specList)
self.log.error('Received args: %s', args)
code = f.func_code
funcArgs = inspect.getargs(code)[0][len(self.commandArgs):]
self.log.error('Extra args: %s', funcArgs)
raise
return utils.python.changeFunctionName(newf, name, f.__doc__)
__all__ = [
# Contexts.
'any', 'many',
'optional', 'additional',
'rest', 'getopts',
'first', 'reverse',
'commalist',
# Converter helpers.
'getConverter', 'addConverter', 'callConverter',
# Decorators.
'urlSnarfer', 'thread',
# Functions.
'wrap', 'process', 'regexp_wrapper',
# Stuff for testing.
'Spec',
]
# This doesn't work. Suck.
## if world.testing:
## __all__.append('Spec')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause | 4,640,401,204,701,421,000 | 32.1 | 84 | 0.593505 | false |
akhileshpillai/treeherder | treeherder/webapp/api/runnable_jobs.py | 1 | 5374 | from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.status import HTTP_500_INTERNAL_SERVER_ERROR
from treeherder.etl.common import fetch_json
from treeherder.model import models
class RunnableJobsViewSet(viewsets.ViewSet):
"""
This viewset is responsible for the runnable_jobs endpoint.
"""
def list(self, request, project):
"""
GET method implementation for list of all runnable buildbot jobs
"""
decision_task_id = request.query_params.get('decisionTaskID')
if decision_task_id:
tc_graph_url = settings.TASKCLUSTER_TASKGRAPH_URL.format(task_id=decision_task_id)
tc_graph = None
validate = URLValidator()
try:
validate(tc_graph_url)
tc_graph = fetch_json(tc_graph_url)
except ValidationError:
# We pass here as we still want to schedule BuildBot jobs
pass
except Exception as ex:
return Response("Exception: {0}".format(ex), status=HTTP_500_INTERNAL_SERVER_ERROR)
else:
tc_graph = {}
repository = models.Repository.objects.get(name=project)
options_by_hash = models.OptionCollection.objects.all().select_related(
'option').values_list('option__name', 'option_collection_hash')
runnable_jobs = models.RunnableJob.objects.filter(
repository=repository
).select_related('build_platform', 'machine_platform',
'job_type', 'job_type__job_group')
ret = []
# Adding buildbot jobs
for datum in runnable_jobs:
options = ' '.join(option_name for (option_name, col_hash) in options_by_hash
if col_hash == datum.option_collection_hash)
ret.append({
'build_platform_id': datum.build_platform.id,
'build_platform': datum.build_platform.platform,
'build_os': datum.build_platform.os_name,
'build_architecture': datum.build_platform.architecture,
'machine_platform_id': datum.machine_platform.id,
'platform': datum.machine_platform.platform,
'machine_platform_os': datum.machine_platform.os_name,
'machine_platform_architecture': datum.machine_platform.architecture,
'job_group_id': datum.job_type.job_group.id,
'job_group_name': datum.job_type.job_group.name,
'job_group_symbol': datum.job_type.job_group.symbol,
'job_group_description': datum.job_type.job_group.description,
'job_type_id': datum.job_type.id,
'job_type_name': datum.job_type.name,
'job_type_symbol': datum.job_type.symbol,
'job_type_description': datum.job_type.description,
'option_collection_hash': datum.option_collection_hash,
'ref_data_name': datum.ref_data_name,
'build_system_type': datum.build_system_type,
'platform_option': options,
'job_coalesced_to_guid': None,
'state': 'runnable',
'result': 'runnable'})
for label, node in tc_graph.iteritems():
extra = node['task'].get('extra')
if not extra or not extra.get('treeherder'):
# some tasks don't have the treeherder information we need
# to be able to display them (and are not intended to be
# displayed). skip.
continue
treeherder_options = extra['treeherder']
task_metadata = node['task']['metadata']
build_platform = treeherder_options.get('machine', {}).get('platform', '')
# Not all tasks have a group name
job_group_name = treeherder_options.get('groupName', '')
# Not all tasks have a group symbol
job_group_symbol = treeherder_options.get('groupSymbol', '')
# Not all tasks have a collection
if 'collection' in treeherder_options:
platform_option = ' '.join(treeherder_options['collection'].keys())
else:
platform_option = ""
ret.append({
'build_platform': build_platform,
'platform': build_platform,
'job_group_name': job_group_name,
'job_group_symbol': job_group_symbol,
'job_type_name': task_metadata['name'],
'job_type_symbol': treeherder_options['symbol'],
'job_type_description': task_metadata['description'],
'ref_data_name': label,
'build_system_type': 'taskcluster',
'platform_option': platform_option,
'job_coalesced_to_guid': None,
'state': 'runnable',
'result': 'runnable'})
response_body = dict(meta={"repository": project,
"offset": 0,
"count": len(ret)},
results=ret)
return Response(response_body)
| mpl-2.0 | -607,443,222,029,147,600 | 42.691057 | 99 | 0.566617 | false |
avanzosc/avanzosc6.1 | avanzosc_tire_management/wizard/wizard_interchange.py | 1 | 15211 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011-2012 Daniel (Avanzosc) <http://www.avanzosc.com>
# 28/03/2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
import wizard
import pooler
import Image
class wizard_interchange (wizard.interface):
form1 = '''<?xml version="1.0"?>
<form string="Tire Change">
<field name="tire" width="250" height="50"/>
<separator string="Move Tire" colspan="4"/>
<field name="origin" width="250" height="50"/>
<field name="destination" width="250" height="50" domain="[('name','like','bus')]" />
<group string="Bus Location" colspan="4" attrs="{'invisible':[('destination','=','Tire Stock')]}">
<field name="locat" width="150" height="50" domain="[('location_id','=',destination)]" />
<field name="odometer" />
</group>
</form>'''
form1_fields = {
'tire': {
'string': 'Tire',
'type': 'many2one',
'relation': 'stock.production.lot',
'required': True,
'readonly': True
},
'origin': {
'string': 'Origin',
'type': 'many2one',
'relation': 'stock.location',
'required': True,
'readonly': True
},
'destination': {
'string': 'Destination',
'type': 'many2one',
'relation': 'stock.location',
'required': True
},
'locat': {
'string': 'Tire Location',
'type': 'many2one',
'relation': 'stock.location',
'required': True
},
'odometer': {
'string': 'Odometer',
'type': 'integer',
'required': True
},
}
form2 = '''<?xml version="1.0"?>
<form string="Tire move">
<label string="Location occupied! The chosen location already has a tire assigned, move it before assigning new one." colspan="4"/>
</form>'''
form2_fields = {}
form3 = '''<?xml version="1.0"?>
<form string="Tire move">
<separator string="Tire movement must be from a Vehicle!" colspan="4"/>
</form>'''
form3_fields = {}
form4 = '''<?xml version="1.0"?>
<form string="Tire move">
<separator string="Tire movement must be to a Vehicle!" colspan="4"/>
</form>'''
form4_fields = {}
form5 = '''<?xml version="1.0"?>
<form string="Tire move">
<separator string="Tire must in a Vehicle!" colspan="4"/>
</form>'''
form5_fields = {}
form6 = '''<?xml version="1.0"?>
<form string="Tire move">
<separator string="Tire correctly moved! " colspan="4"/>
</form>'''
form6_fields = {}
def tire_init (self,cr,uid, data,context):
move_data = {}
pool = pooler.get_pool(cr.dbname)
tire_obj = pool.get('stock.production.lot')
move_obj = pool.get('stock.move')
loc_obj = pool.get('stock.location')
company_obj = pool.get('res.company')
tire = tire_obj.browse(cr,uid,data['id'])
company=tire.company_id
move_list = move_obj.search(cr,uid,[('prodlot_id','=',tire.id)])
locat_default = company.tire_stock
destini = False
if move_list == []:
origin = locat_default.id
else:
loc_id = max(move_list)
move= move_obj.browse(cr,uid, loc_id)
origin = move.location_dest_id.id
move_data={'tire':tire.id, 'origin': origin, 'destination': destini}
return move_data
def tire_interchange (self,cr,uid, data,context):
pool = pooler.get_pool(cr.dbname)
tire_obj = pool.get('stock.production.lot')
tire_data_obj = pool.get('tire.stock.lot')
move_obj = pool.get('stock.move')
vehic_obj = pool.get('fleet.vehicles')
loc_obj = pool.get('stock.location')
company_obj = pool.get('res.company')
tire = tire_obj.browse(cr,uid,data['form']['tire'])
company=tire.company_id
move_list = move_obj.search(cr,uid,[('prodlot_id','=',tire.id)])
destination = loc_obj.browse (cr,uid,data['form']['destination'])
destination_name = destination.name
origin = loc_obj.browse (cr,uid,data['form']['origin'])
origin_name = origin.name
#Comprobar si el origen es un vehiculo
loc_parent_ori = origin.location_id.id
if loc_parent_ori:
vehic_list = vehic_obj.search(cr,uid,[('buslocat','=',loc_parent_ori)])
else : vehic_list = []
if vehic_list ==[]:
ori_vehicle = False
res = 'ori_vehi'
else:
ori_vehicle = True
vehicle_ori = vehic_obj.browse(cr,uid,vehic_list[0]) # Origin Vehicle
# Termina comprobación origen
#Comprobar destino es vehiculo
if data['form']['locat'] :
dest_vehi = True
location = loc_obj.browse (cr,uid,data['form']['locat'])
vehicle_list = vehic_obj.search(cr,uid,[('buslocat','=',destination.id)])
vehicle_dest = vehic_obj.browse(cr,uid,vehicle_list[0]) # Destination Vehicle
else:
dest_vehi = False
res= 'dest_vehi'
#Termina comprobación destino
if dest_vehi and ori_vehicle : # Destination AND Origin = Vehicle
res = 'moved'
# actualizar odometro rueda
odometer = data['form']['odometer']
if move_list == []:
res = 'error'
else:
loc_id = max(move_list)
move= move_obj.browse(cr,uid, loc_id)
result = int(odometer) - move.odometer
tire_odometer = tire.tire_km + result
if tire.odometers:
odometer_text = tire.odometers + "\n" + str(data['form']['odometer'])
else: odometer_text = str(data['form']['odometer'])
tire_val= {'tire_km' : tire_odometer, 'odometers' : odometer_text}
# Termina actualización odometro rueda
#Datos movimiento
product_id = tire.product_id
move_data = {'product_id' : tire.product_id.id,
'name' : origin.name + ' | ' + tire.name + ' => ' + destination.name,
'location_id' : origin.id,
'product_uom': tire.product_id.product_tmpl_id.uom_id.id,
'prodlot_id' : tire.id,
'location_dest_id': location.id,
'odometer': odometer
}
#Datos rueda
tire_data_list=tire_data_obj.search(cr,uid,[('lot_id','=',tire.id)])
tire_data_id = max(tire_data_list)
tire_data = tire_data_obj.browse(cr,uid,tire_data_id)
tire_data_val={
'name': origin.name + ' | ' + tire.name + ' => ' + destination.name,
'lot_id': tire.id,
'origin' : origin.id,
'destination': location.id,
# 'data':time.strftime('%Y-%m-%d %H:%M:%S'),
'odomold' : tire_data.odomnew,
'odomnew' : odometer,
'tire_km' : odometer - tire_data.odomnew,
'tire_km_total':tire_data.tire_km_total + odometer - tire_data.odomnew
}
#Fin datos rueda
occupied = False
if location.name.endswith("-1"): # Tire to right
mount = {'f_l_tire' : tire.id}
if vehicle_dest.f_l_tire.id: # Tire occupied
occupied = vehicle_dest.f_l_tire
elif location.name.endswith("-2"):
mount = {'f_r_tire' : tire.id}
if vehicle_dest.f_r_tire.id: # Tire occupied
occupied = vehicle_dest.f_r_tire
if vehicle_dest.tires == 6:
if location.name.endswith("-3"):
mount = {'r_l_tire1' : tire.id}
if vehicle_dest.r_l_tire1.id:
occupied = vehicle_dest.r_l_tire1
elif location.name.endswith("-4"):
mount = {'r_l_tire2' : tire.id}
if vehicle_dest.r_l_tire2.id:
occupied = vehicle_dest.r_l_tire2
elif location.name.endswith("-5"):
mount = {'r_r_tire2' : tire.id}
if vehicle_dest.r_r_tire2.id:
occupied = vehicle_dest.r_r_tire2
elif location.name.endswith("-6"):
mount = {'r_r_tire1' : tire.id}
if vehicle_dest.r_r_tire1.id:
occupied = vehicle_dest.r_r_tire1
if vehicle_dest.tires > 6:
if location.name.endswith("-3"):
mount = {'m_l_tire1' : tire.id}
if vehicle_dest.m_l_tire1.id:
occupied = vehicle_dest.m_l_tire1
elif location.name.endswith("-4"):
mount = {'m_l_tire2' : tire.id}
if vehicle_dest.m_l_tire2.id:
occupied = vehicle_dest.m_l_tire2
elif location.name.endswith("-5"):
mount = {'m_r_tire2' : tire.id}
if vehicle_dest.m_r_tire2.id:
occupied = vehicle_dest.m_r_tire2
elif location.name.endswith("-6"):
mount = {'m_r_tire1' : tire.id}
if vehicle_dest.m_r_tire1.id:
occupied = vehicle_dest.m_r_tire1
elif location.name.endswith("-7"):
mount = {'r_l_tire1' : tire.id}
if vehicle_dest.r_l_tire1.id:
occupied = vehicle_dest.r_l_tire1
elif location.name.endswith("-8"):
mount = {'r_r_tire1' : tire.id}
if vehicle_dest.r_r_tire1.id:
occupied = vehicle_dest.r_r_tire1
if not occupied:
#Actualiza rueda
tire_obj.write(cr,uid, tire.id,tire_val)
#actualiza vehiculo destino
vehic_obj.write(cr,uid, vehicle_dest.id, mount)
#actualiza movimiento
move_id = move_obj.create(cr,uid,move_data)
#crear datos neumático
move_data_reg = move_obj.browse(cr,uid,move_id)
tire_data_val['data']= move_data_reg.date
data_id= tire_data_obj.create(cr,uid,tire_data_val)
#actualiza vehiculo origen
if origin_name.endswith("-1"):
update ={ 'f_l_tire' : False}
elif origin_name.endswith("-2"):
update ={ 'f_r_tire' : False}
if vehicle_ori.tires == 6:
if origin_name.endswith("-3"):
update ={ 'r_l_tire1' : False}
elif origin_name.endswith("-4"):
update ={ 'r_l_tire2' : False}
elif origin_name.endswith("-5"):
update ={ 'r_r_tire2' : False}
elif origin_name.endswith("-6"):
update ={ 'r_r_tire1' : False}
elif vehicle_ori.tires > 6:
if origin_name.endswith("-3"):
update ={ 'm_l_tire1' : False}
elif origin_name.endswith("-4"):
update ={ 'm_l_tire2' : False}
elif origin_name.endswith("-5"):
update ={ 'm_r_tire2' : False}
elif origin_name.endswith("-6"):
update ={ 'm_r_tire1' : False}
elif origin_name.endswith("-7"):
update ={ 'r_l_tire1' : False}
elif origin_name.endswith("-8"):
update ={ 'r_r_tire1' : False}
vehic_obj.write(cr,uid,vehicle_ori.id,update)
elif occupied:
res = 'full'
return res
states = {
'init': {
'actions': [tire_init],
'result': {'type': 'form', 'arch':form1, 'fields':form1_fields, 'state': [('end', 'Cancel','gtk-cancel'),('mount', 'Accept','gtk-ok')]}
},
'mount': {
'actions' : [],
'result': {'type': 'choice', 'next_state': tire_interchange}
},
'full' : {
'actions' : [],
'result': {'type': 'form', 'arch':form2, 'fields':form2_fields,'state': [('end', 'Accept','gtk-ok')]}
},
'ori_vehi': {
'actions' : [],
'result': {'type': 'form', 'arch':form3, 'fields':form3_fields,'state': [('end', 'Accept','gtk-cancel')]}
},
'dest_vehi': {
'actions' : [],
'result': {'type': 'form', 'arch':form4, 'fields':form4_fields,'state': [('end', 'Accept','gtk-cancel')]}
},
'error': {
'actions' : [],
'result': {'type': 'form', 'arch':form5, 'fields':form5_fields,'state': [('end', 'Accept','gtk-cancel')]}
},
'moved': {
'actions' : [],
'result': {'type': 'form', 'arch':form6, 'fields':form6_fields,'state': [('end', 'Accept','gtk-ok')]}
}
}
wizard_interchange('tire.interchange')
| agpl-3.0 | 5,135,022,820,725,371,000 | 42.827089 | 156 | 0.460577 | false |
spark8103/ops17 | tools/test_init.py | 1 | 18237 | #!/bin/env python
# coding: utf-8
from app import db
from app.models import User, Role, Department, Idc, Server, Software, Project, Module, Environment
import os
if os.path.exists('.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
def department_insert_data():
departments = {
u'管理中心': (None,''),
u'技术中心': (None, ''),
u'营销中心': (None, ''),
u'行政部': (Department.query.filter_by(name=u"管理中心").first(),''),
u'财务部': (Department.query.filter_by(name=u"管理中心").first(), ''),
u'运维部': (Department.query.filter_by(name=u"技术中心").first(), ''),
u'DBA部': (Department.query.filter_by(name=u"技术中心").first(), ''),
u'开发部': (Department.query.filter_by(name=u"技术中心").first(), ''),
u'测试部': (Department.query.filter_by(name=u"技术中心").first(), ''),
u'市场部': (Department.query.filter_by(name=u"营销中心").first(), ''),
u'活动部': (Department.query.filter_by(name=u"营销中心").first(), ''),
}
for r in departments:
department = Department.query.filter_by(name=r).first()
if department is None:
department = Department(name=r)
if isinstance(departments[r][0], int):
department.parent_id = departments[r][0]
else:
department.parent = departments[r][0]
department.description = departments[r][1]
db.session.add(department)
db.session.commit()
print "Insert department test data."
def user_insert_data():
users = {
'admin': ('[email protected]', 13465245521, Department.query.filter_by(name=u"管理中心").first(),
Role.query.filter_by(name="Administrator").first(), 'admin', True, "admin"),
'ops1': ('[email protected]', 13764110236, Department.query.filter_by(name=u"运维部").first(),
Role.query.filter_by(name="User").first(), 'ops1', False, "ops"),
'ops2': ('[email protected]', 13764110238, Department.query.filter_by(name=u"运维部").first(),
Role.query.filter_by(name="User").first(), 'ops2', False, "ops"),
'dev1': ('[email protected]', 13612451124, Department.query.filter_by(name=u"开发部").first(),
Role.query.filter_by(name="User").first(), 'dev1', False, "dev"),
'dev2': ('[email protected]', 13625412214, Department.query.filter_by(name=u"开发部").first(),
Role.query.filter_by(name="User").first(), 'dev2', False, "dev"),
'qa1': ('[email protected]', 13112453365, Department.query.filter_by(name=u"测试部").first(),
Role.query.filter_by(name="User").first(), 'qa1', False, "qa"),
'qa2': ('[email protected]', 13124556847, Department.query.filter_by(name=u"测试部").first(),
Role.query.filter_by(name="User").first(), 'qa2', False, "qa"),
'dba1': ('[email protected]', 13321542635, Department.query.filter_by(name=u"DBA部").first(),
Role.query.filter_by(name="User").first(), 'dba1', False, "dba"),
'dba2': ('[email protected]', 13214512245, Department.query.filter_by(name=u"DBA部").first(),
Role.query.filter_by(name="User").first(), 'dba2', False, "dba"),
'user1': ('[email protected]', 13412115694, Department.query.filter_by(name=u"活动部").first(),
Role.query.filter_by(name="User").first(), 'user1', False, "user"),
'user2': ('[email protected]', 13451489521, Department.query.filter_by(name=u"行政部").first(),
Role.query.filter_by(name="User").first(), 'user2', False, "user"),
'user3': ('[email protected]', 13465218952, Department.query.filter_by(name=u"营销中心").first(),
Role.query.filter_by(name="User").first(), 'user3', False, "manager"),
'user4': ('[email protected]', 13462548991, Department.query.filter_by(name=u"管理中心").first(),
Role.query.filter_by(name="User").first(), 'user4', False, "manager"),
}
for u in users:
user = User.query.filter_by(username=u).first()
if user is None:
user = User(username=u)
user.email = users[u][0]
user.mobile = users[u][1]
user.department = users[u][2]
user.role = users[u][3]
user.password = users[u][4]
user.allow_login = users[u][5]
user.type = users[u][6]
db.session.add(user)
db.session.commit()
print "Insert user test data."
def idc_insert_data():
idcs = {
u'周浦': '',
u'北京南路': '',
u'欧阳路': '',
u'万国数据中心': '',
u'Ucloud': '',
u'aliyun': '',
u'北京酒仙桥': '',
u'金华双线': '',
u'宁波三线': '',
u'无锡线路': '',
u'南京联通': '',
u'青岛联通': '',
}
for s in idcs:
idc = Idc.query.filter_by(name=s).first()
if idc is None:
idc = Idc(name=s)
idc.description = idcs[s]
db.session.add(idc)
db.session.commit()
print "Insert idc test data."
def server_insert_data():
servers = {
u'zp-prd-app-10': (
"zp-prd-app", Idc.query.filter_by(name=u"周浦").first(), "K1", '10.10.10.10', '', u'大数据', "PRD",
"server", "Online", ""),
u'zp-prd-app-11': (
"zp-prd-app", Idc.query.filter_by(name=u"周浦").first(), "K2", '10.10.10.11', '', u'大数据', "PRD",
"server", "Online", ""),
u'oyl-stg-app-101': (
"oyl-stg-app", Idc.query.filter_by(name=u"欧阳路").first(), "R11", '10.18.23.101', '', u'网站部',
"STG", "server", "Online", ""),
u'oyl-stg-app-102': (
"oyl-stg-app", Idc.query.filter_by(name=u"欧阳路").first(), "R11", '10.18.23.102', '', u'网站部',
"STG", "server", "Online", ""),
u'dev-oracle-21': (
"dev-oracle", Idc.query.filter_by(name=u"北京南路").first(), "A01", '172.16.11.21', '', u'IT部',
"DEV", "vserver", "Online", ""),
u'dev-oracle-22': (
"dev-oracle", Idc.query.filter_by(name=u"北京南路").first(), "A01", '172.16.11.22', '', u'IT据',
"DEV", "vserver", "Online", ""),
u'px-prd-app-10': (
"px-prd-app", Idc.query.filter_by(name=u"万国数据中心").first(), "K1", '10.88.10.10', '', u'大数据',
"PRD", "server", "Online", ""),
u'px-prd-app-11': (
"px-prd-app", Idc.query.filter_by(name=u"万国数据中心").first(), "K2", '10.88.10.11', '', u'大数据',
"PRD", "server", "Online", ""),
u'uc-stg-app-101': (
"uc-stg-app", Idc.query.filter_by(name=u"Ucloud").first(), "R11", '10.99.123.101', '', u'网站部',
"STG", "server", "Online", ""),
u'uc-stg-app-102': (
"uc-stg-app", Idc.query.filter_by(name=u"Ucloud").first(), "R11", '10.99.123.102', '', u'网站部',
"STG", "server", "Online", ""),
u'wx-oracle-21': (
"wx-oracle", Idc.query.filter_by(name=u"无锡线路").first(), "A01", '172.16.11.21', '', u'IT部',
"DEV", "vserver", "Online", ""),
u'wx-oracle-22': (
"wx-oracle", Idc.query.filter_by(name=u"无锡线路").first(), "A01", '172.16.11.22', '', u'IT据',
"DEV", "vserver", "Online", ""),
}
for s in servers:
server = Server.query.filter_by(name=s).first()
if server is None:
server = Server(name=s)
server.category_branch = servers[s][0]
server.idc = servers[s][1]
server.rack = servers[s][2]
server.private_ip = servers[s][3]
server.public_ip = servers[s][4]
server.category = servers[s][5]
server.env = servers[s][6]
server.type = servers[s][7]
server.status = servers[s][8]
server.description = servers[s][9]
db.session.add(server)
db.session.commit()
print "Insert server test data."
def project_insert_data():
projects = {
u'bd-blink': (Department.query.filter_by(name=u"管理中心").first(),
User.query.filter_by(username=u'user1').first(), '99999'),
u'bd-tiger': (Department.query.filter_by(name=u"管理中心").first(),
User.query.filter_by(username=u'user2').first(), '99999'),
u'bd-cmdb': (Department.query.filter_by(name=u"运维部").first(),
User.query.filter_by(username=u'ops1').first(), '999'),
u'bd-bdmp': (Department.query.filter_by(name=u"运维部").first(),
User.query.filter_by(username=u'ops2').first(), '999'),
u'bd-test': (Department.query.filter_by(name=u"开发部").first(),
User.query.filter_by(username=u'dev1').first(), '999'),
u'bd-test2': (Department.query.filter_by(name=u"开发部").first(),
User.query.filter_by(username=u'dev2').first(), '999'),
u'bd-test3': (Department.query.filter_by(name=u"开发部").first(),
User.query.filter_by(username=u'dev1').first(), '999'),
u'bd-jenkins': (Department.query.filter_by(name=u"测试部").first(),
User.query.filter_by(username=u'qa1').first(), '999'),
u'bd-qa': (Department.query.filter_by(name=u"测试部").first(),
User.query.filter_by(username=u'qa2').first(), '999'),
u'bd-oracle': (Department.query.filter_by(name=u"DBA部").first(),
User.query.filter_by(username=u'dba1').first(), '999'),
u'bd-mongodb': (Department.query.filter_by(name=u"DBA部").first(),
User.query.filter_by(username=u'dba2').first(), '999'),
}
for s in projects:
project = Project.query.filter_by(name=s).first()
if project is None:
project = Project(name=s)
project.department = projects[s][0]
project.pm = projects[s][1]
project.sla = projects[s][2]
db.session.add(project)
db.session.commit()
print "Insert project test data."
def module_insert_data():
modules = {
u'bd-blink-server': (Project.query.filter_by(name=u"bd-blink").first(), 'http://10.10.10.5/svn/bd-blink/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops1').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-tiger-web': (Project.query.filter_by(name=u"bd-tiger").first(), 'http://10.10.10.5/svn/bd-tiger/',
User.query.filter_by(username=u'dev2').first(), User.query.filter_by(username=u'qa2').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-cmdb': (Project.query.filter_by(name=u"bd-cmdb").first(), 'http://10.10.10.5/svn/bd-cmdb/',
User.query.filter_by(username=u'dev2').first(), User.query.filter_by(username=u'qa2').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-bdmp': (Project.query.filter_by(name=u"bd-bdmp").first(), 'http://10.10.10.5/svn/bd-bdmp/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-test': (Project.query.filter_by(name=u"bd-test").first(), 'http://10.10.10.5/svn/bd-test/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-test2': (Project.query.filter_by(name=u"bd-test2").first(), 'http://10.10.10.5/svn/bd-test2/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-test3': (Project.query.filter_by(name=u"bd-test3").first(), 'http://10.10.10.5/svn/bd-test3/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-jenkins': (Project.query.filter_by(name=u"bd-jenkins").first(), 'http://10.10.10.5/svn/bd-jenkins/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-qa': (Project.query.filter_by(name=u"bd-qa").first(), 'http://10.10.10.5/svn/bd-qa/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-oracle': (Project.query.filter_by(name=u"bd-oracle").first(), 'http://10.10.10.5/svn/bd-oracle/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-mongodb': (Project.query.filter_by(name=u"bd-mongodb").first(), 'http://10.10.10.5/svn/bd-mongodb/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
}
for m in modules:
module = Module.query.filter_by(name=m).first()
if module is None:
module = Module(name=m)
module.project = modules[m][0]
module.svn = modules[m][1]
module.dev = modules[m][2]
module.qa = modules[m][3]
module.ops = modules[m][4]
module.software = modules[m][5]
db.session.add(module)
db.session.commit()
print "Insert module test data."
def environment_insert_data():
environments = {
u'bd-blink-server': (Module.query.filter_by(name=u"bd-blink-server").first(), 'PRD',
Idc.query.filter_by(name=u'周浦').first(), "http://www.blink.com/status",
"/opt/app/bd-blink-server/", "www.blink.com"),
u'bd-tiger-web': (Module.query.filter_by(name=u"bd-tiger-web").first(), 'PRD',
Idc.query.filter_by(name=u'周浦').first(), "http://www.tiger.com/status",
"/opt/app/bd-tiger-web/", "www.tiger.com"),
u'bd-cmdb': (Module.query.filter_by(name=u"bd-cmdb").first(), 'PRD',
Idc.query.filter_by(name=u'周浦').first(), "http://www.cmdb.com/status",
"/opt/app/bd-cmdb/", "www.cmdb.com"),
u'bd-bdmp': (Module.query.filter_by(name=u"bd-bdmp").first(), 'PRD',
Idc.query.filter_by(name=u'周浦').first(), "http://www.bdmp.com/status",
"/opt/app/bd-bdmp/", "www.bdmp.com"),
u'bd-test': (Module.query.filter_by(name=u"bd-test").first(), 'DEV',
Idc.query.filter_by(name=u'周浦').first(), "http://www.test.com/status",
"/opt/app/bd-test/", "www.test.com"),
u'bd-test2': (Module.query.filter_by(name=u"bd-test2").first(), 'DEV',
Idc.query.filter_by(name=u'周浦').first(), "http://www.test2.com/status",
"/opt/app/bd-test2/", "www.test2.com"),
u'bd-test3': (Module.query.filter_by(name=u"bd-test3").first(), 'DEV',
Idc.query.filter_by(name=u'周浦').first(), "http://www.test3.com/status",
"/opt/app/bd-test3/", "www.test3.com"),
u'bd-jenkins': (Module.query.filter_by(name=u"bd-jenkins").first(), 'QA',
Idc.query.filter_by(name=u'周浦').first(), "http://www.jenkins.com/status",
"/opt/app/bd-jenkins/", "www.jenkins.com"),
u'bd-qa': (Module.query.filter_by(name=u"bd-qa").first(), 'QA',
Idc.query.filter_by(name=u'周浦').first(), "http://www.qa.com/status",
"/opt/app/bd-qa/", "www.qa.com"),
u'bd-oracle': (Module.query.filter_by(name=u"bd-oracle").first(), 'STG',
Idc.query.filter_by(name=u'周浦').first(), "http://www.oracle.com/status",
"/opt/app/bd-oracle/", "www.oracle.com"),
u'bd-mongodb': (Module.query.filter_by(name=u"bd-mongodb").first(), 'STG',
Idc.query.filter_by(name=u'周浦').first(), "http://www.mongodb.com/status",
"/opt/app/bd-mongodb/", "www.mongodb.com"),
}
for e in environments:
environment = Environment(
module=environments[e][0],
env=environments[e][1],
idc=environments[e][2],
check_point1=environments[e][3],
deploy_path=environments[e][4],
domain=environments[e][5])
db.session.add(environment)
db.session.commit()
print "Insert environment test data."
| mit | -3,189,856,236,642,584,000 | 46.867568 | 123 | 0.54085 | false |
lexifdev/pyconkr-2015 | pyconkr/views.py | 1 | 12386 | # -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.contrib.auth import login as user_login, logout as user_logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.shortcuts import render, redirect
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import ListView, DetailView, UpdateView
from datetime import datetime, timedelta
from uuid import uuid4
from .forms import EmailLoginForm, SpeakerForm, ProgramForm, RegistrationForm
from .helper import sendEmailToken, render_json, send_email_ticket_confirm, render_io_error
from .models import (Room,
Program, ProgramDate, ProgramTime, ProgramCategory,
Speaker, Sponsor, Jobfair, Announcement,
EmailToken, Registration, Product)
from iamporter import get_access_token, Iamporter, IamporterError
logger = logging.getLogger(__name__)
payment_logger = logging.getLogger('payment')
def index(request):
return render(request, 'index.html', {
'base_content': FlatPage.objects.get(url='/index/').content,
'recent_announcements': Announcement.objects.all()[:3],
})
def schedule(request):
dates = ProgramDate.objects.all()
times = ProgramTime.objects.all()
rooms = Room.objects.all()
wide = {}
narrow = {}
processed = set()
for d in dates:
wide[d] = {}
narrow[d] = {}
for t in times:
wide[d][t] = {}
narrow[d][t] = {}
for r in rooms:
s = Program.objects.filter(date=d, times=t, rooms=r)
if s:
if s[0].times.all()[0] == t and s[0].id not in processed:
wide[d][t][r] = s[0]
narrow[d][t][r] = s[0]
processed.add(s[0].id)
else:
wide[d][t][r] = None
if len(narrow[d][t]) == 0:
del(narrow[d][t])
contexts = {
'wide': wide,
'narrow': narrow,
'rooms': rooms,
}
return render(request, 'schedule.html', contexts)
class RoomDetail(DetailView):
model = Room
class SponsorList(ListView):
model = Sponsor
class SponsorDetail(DetailView):
model = Sponsor
class SpeakerList(ListView):
model = Speaker
class SpeakerDetail(DetailView):
model = Speaker
def get_context_data(self, **kwargs):
context = super(SpeakerDetail, self).get_context_data(**kwargs)
if self.request.user.is_authenticated():
if self.request.user.email == self.object.email:
context['editable'] = True
return context
class SpeakerUpdate(UpdateView):
model = Speaker
form_class = SpeakerForm
def get_queryset(self):
queryset = super(SpeakerUpdate, self).get_queryset()
return queryset.filter(email=self.request.user.email)
class ProgramList(ListView):
model = ProgramCategory
template_name = 'pyconkr/program_list.html'
class ProgramDetail(DetailView):
model = Program
def get_context_data(self, **kwargs):
context = super(ProgramDetail, self).get_context_data(**kwargs)
if self.request.user.is_authenticated():
for speaker in self.object.speakers.all():
if self.request.user.email == speaker.email:
context['editable'] = True
return context
class ProgramUpdate(UpdateView):
model = Program
form_class = ProgramForm
def get_queryset(self):
queryset = super(ProgramUpdate, self).get_queryset()
return queryset.filter(speakers__email=self.request.user.email)
class JobfairList(ListView):
model = Jobfair
class AnnouncementList(ListView):
model = Announcement
def get_queryset(self):
now = datetime.now()
queryset = super(AnnouncementList, self).get_queryset()
return queryset.filter(Q(announce_after__isnull=True) | Q(announce_after__lt=now))
class AnnouncementDetail(DetailView):
model = Announcement
def robots(request):
return render(request, 'robots.txt', content_type='text/plain')
def login(request):
form = EmailLoginForm()
if request.method == 'POST':
form = EmailLoginForm(request.POST)
if form.is_valid():
# Remove previous tokens
email = form.cleaned_data['email']
EmailToken.objects.filter(email=email).delete()
# Create new
token = EmailToken(email=email)
token.save()
sendEmailToken(request, token)
return redirect(reverse('login_mailsent'))
return render(request, 'login.html', {
'form': form,
'title': _('Login'),
})
@never_cache
def login_req(request, token):
time_threshold = datetime.now() - timedelta(hours=1)
try:
token = EmailToken.objects.get(token=token, created__gte=time_threshold)
except ObjectDoesNotExist:
return render(request, 'login_notvalidtoken.html', {
'title': _('Not valid token')
})
email = token.email
# Create user automatically by email as id, token as password
try:
user = User.objects.get(email=email)
except ObjectDoesNotExist:
user = User.objects.create_user(email, email, token)
user.save()
token.delete()
# Set backend manually
user.backend = 'django.contrib.auth.backends.ModelBackend'
user_login(request, user)
return redirect(reverse('index'))
@never_cache
def login_mailsent(request):
return render(request, 'login_mailsent.html', {
'title': _('Mail sent'),
})
def logout(request):
user_logout(request)
return redirect(reverse('index'))
@login_required
def profile(request):
return render(request, 'profile.html')
@login_required
def registration_info(request):
is_ticket_open = is_registration_time()
return render(request, 'pyconkr/registration/info.html', {
"is_ticket_open" : is_ticket_open
})
@login_required
def registration_status(request):
try:
registration = Registration.objects.filter(user=request.user).get()
except Registration.DoesNotExist:
registration = None
return render(request, 'pyconkr/registration/status.html', {
'title': _('Registration'),
'registration': registration,
})
@login_required
def registration_payment(request):
if not is_registration_time():
return redirect('registration_info')
if request.method == 'GET':
product = Product()
registered = Registration.objects.filter(
user=request.user,
payment_status__in=['paid', 'ready']
).exists()
if registered:
return redirect('registration_status')
uid = str(uuid4()).replace('-', '')
form = RegistrationForm(initial={'email': request.user.email})
return render(request, 'pyconkr/registration/payment.html', {
'title': _('Registration'),
'IMP_USER_CODE': settings.IMP_USER_CODE, # TODO : Move to 'settings context processor'
'form': form,
'uid': uid,
'product_name': product.name,
'amount': product.price,
'vat': 0,
})
elif request.method == 'POST':
payment_logger.debug(request.POST)
form = RegistrationForm(request.POST)
# TODO : more form validation
# eg) merchant_uid
if not form.is_valid():
form_errors_string = '\n'.join(('%s:%s' % (k, v[0]) for k, v in form.errors.items()))
return render_json({
'success': False,
'message': form_errors_string, # TODO : ...
})
remain_ticket_count = (settings.MAX_TICKET_NUM - Registration.objects.filter(payment_status__in=['paid', 'ready']).count())
# sold out
if remain_ticket_count <= 0:
return render_json({
'success': False,
'message': u'티켓이 매진 되었습니다',
})
registration, created = Registration.objects.get_or_create(user=request.user)
registration.name = form.cleaned_data.get('name')
registration.email = request.user.email
registration.company = form.cleaned_data.get('company', '')
registration.phone_number = form.cleaned_data.get('phone_number', '')
registration.merchant_uid = request.POST.get('merchant_uid')
registration.save() # TODO : use form.save()
try:
product = Product()
access_token = get_access_token(settings.IMP_API_KEY, settings.IMP_API_SECRET)
imp_client = Iamporter(access_token)
if request.POST.get('payment_method') == 'card':
# TODO : use validated and cleaned data
imp_client.onetime(
token=request.POST.get('token'),
merchant_uid=request.POST.get('merchant_uid'),
amount=request.POST.get('amount'),
# vat=request.POST.get('vat'),
card_number=request.POST.get('card_number'),
expiry=request.POST.get('expiry'),
birth=request.POST.get('birth'),
pwd_2digit=request.POST.get('pwd_2digit'),
customer_uid=form.cleaned_data.get('email'),
)
confirm = imp_client.find_by_merchant_uid(request.POST.get('merchant_uid'))
if confirm['amount'] != product.price:
# TODO : cancel
return render_io_error("amount is not same as product.price. it will be canceled")
registration.payment_method = confirm.get('pay_method')
registration.payment_status = confirm.get('status')
registration.payment_message = confirm.get('fail_reason')
registration.vbank_name = confirm.get('vbank_name', None)
registration.vbank_num = confirm.get('vbank_num', None)
registration.vbank_date = confirm.get('vbank_date', None)
registration.vbank_holder = confirm.get('vbank_holder', None)
registration.save()
send_email_ticket_confirm(request, registration)
except IamporterError as e:
# TODO : other status code
return render_json({
'success': False,
'code': e.code,
'message': e.message,
})
else:
return render_json({
'success': True,
})
@csrf_exempt
def registration_payment_callback(request):
merchant_uid = request.POST.get('merchant_uid', None)
if not merchant_uid:
return render_io_error('merchant uid dose not exist')
product = Product()
# TODO : check stock
access_token = get_access_token(settings.IMP_API_KEY, settings.IMP_API_SECRET)
imp_client = Iamporter(access_token)
confirm = imp_client.find_by_merchant_uid(merchant_uid)
if confirm['amount'] != product.price:
# TODO : cancel
return render_io_error('amount is not product.price')
remain_ticket_count = (settings.MAX_TICKET_NUM - Registration.objects.filter(payment_status='paid').count())
if remain_ticket_count <= 0:
# TODO : cancel
return render_json({
'success': False,
'message': u'티켓이 매진 되었습니다'
})
registration = Registration.objects.filter(merchant_uid=merchant_uid).get()
registration.payment_status = 'paid'
registration.save()
send_email_ticket_confirm(request, registration)
return render_json({
'success': True
})
def is_registration_time():
ticket_open_date = datetime.strptime(settings.TICKET_OPEN_DATETIME, '%Y-%m-%d %H:%M:%S')
ticket_close_date = datetime.strptime(settings.TICKET_CLOSE_DATETIME, '%Y-%m-%d %H:%M:%S')
cur = datetime.now()
return ticket_open_date <= cur <= ticket_close_date
| mit | -7,112,165,041,920,347,000 | 30.098237 | 131 | 0.611291 | false |
kmee/pycorreios | test/test_correios.py | 1 | 1853 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
test_root = os.path.dirname(os.path.abspath(__file__))
os.chdir(test_root)
sys.path.insert(0, os.path.dirname(test_root))
sys.path.insert(0, test_root)
import unittest
from pycorreios.correios import Correios
from pycorreios.model import Cep, Frete, Encomenda
class CorreiosTests(unittest.TestCase):
def testFrete(self):
valor_esperado = {'MsgErro': '',
'PrazoEntrega': u'1',
'Erro': u'0',
'ValorValorDeclarado': u'0,00',
'EntregaDomiciliar': u'S',
'ValorMaoPropria': u'0,00',
'EntregaSabado': u'S',
'Valor': u'151,40',
'Codigo': u'40010'
}
valor = Correios().frete(Correios().SEDEX,'44001535',
'03971010',10,18,8)
assert valor == valor_esperado
def testCep(self):
valor_esperado = {'tipo_logradouro': u'Rua',
'bairro': u'Jardim Santa Adelia',
'cidade': u'S\xe3o Paulo',
'uf': u'SP',
'logradouro': u'Pascoal Dias'
}
valor = Correios().cep('03971010')
assert valor == valor_esperado
def testEncomenda(self):
valor_esperado = Encomenda(data='30/11/2010 18:49', local='CEE BRAS - SAO PAULO/SP', status='Entregue')
valor = Correios().encomenda('SW238151411BR')[0]
assert valor.data == valor_esperado.data
assert valor.local == valor_esperado.local
assert valor.status == valor_esperado.status
def main():
unittest.main()
if __name__ == '__main__':
main()
| mit | 7,613,583,466,708,231,000 | 33.962264 | 111 | 0.501349 | false |
gabisurita/kinto-codegen-tutorial | python-client/swagger_client/apis/utilities_api.py | 1 | 21953 | # coding: utf-8
"""
kinto
Kinto is a minimalist JSON storage service with synchronisation and sharing abilities. It is meant to be easy to use and easy to self-host. **Limitations of this OpenAPI specification:** 1. Validation on OR clauses is not supported (e.g. provide `data` or `permissions` in patch operations). 2. [Filtering](http://kinto.readthedocs.io/en/stable/api/1.x/filtering.html) is supported on any field by using `?{prefix}{field_name}={value}`. 3. [Backoff headers](http://kinto.readthedocs.io/en/stable/api/1.x/backoff.html) may occur with any response, but they are only present if the server is under in heavy load, so we cannot validate them on every request. They are listed only on the default error message. 4. [Collection schemas](http://kinto.readthedocs.io/en/stable/api/1.x/collections.html#collection-json-schema) can be provided when defining a collection, but they are not validated by this specification.
OpenAPI spec version: 1.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class UtilitiesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def contribute(self, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.contribute(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.contribute_with_http_info(**kwargs)
else:
(data) = self.contribute_with_http_info(**kwargs)
return data
def contribute_with_http_info(self, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.contribute_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contribute" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/contribute.json'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def heartbeat(self, **kwargs):
"""
Return the status of dependent services.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.heartbeat(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: dict(str, bool)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.heartbeat_with_http_info(**kwargs)
else:
(data) = self.heartbeat_with_http_info(**kwargs)
return data
def heartbeat_with_http_info(self, **kwargs):
"""
Return the status of dependent services.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.heartbeat_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: dict(str, bool)
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method heartbeat" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/__heartbeat__'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, bool)',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def lbheartbeat(self, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.lbheartbeat(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.lbheartbeat_with_http_info(**kwargs)
else:
(data) = self.lbheartbeat_with_http_info(**kwargs)
return data
def lbheartbeat_with_http_info(self, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.lbheartbeat_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method lbheartbeat" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/__lbheartbeat__'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def server_info(self, **kwargs):
"""
Information about the running instance.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.server_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.server_info_with_http_info(**kwargs)
else:
(data) = self.server_info_with_http_info(**kwargs)
return data
def server_info_with_http_info(self, **kwargs):
"""
Information about the running instance.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.server_info_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method server_info" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def version(self, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.version(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.version_with_http_info(**kwargs)
else:
(data) = self.version_with_http_info(**kwargs)
return data
def version_with_http_info(self, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.version_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method version" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/__version__'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| mit | -6,518,620,422,209,996,000 | 37.046794 | 938 | 0.52453 | false |
mgeorgehansen/FIFE_Technomage | engine/python/fife/extensions/pychan/widgets/scrollarea.py | 1 | 4667 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2009 by the FIFE team
# http://www.fifengine.de
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from common import *
from widget import Widget
class ScrollArea(Widget):
"""
A wrapper around another (content) widget.
New Attributes
==============
- content: The wrapped widget.
- vertical_scrollbar: Boolean: Set this to False to hide the Vertical scrollbar
- horizontal_scrollbar: Boolean: Set this to False to hide the Horizontal scrollbar
"""
ATTRIBUTES = Widget.ATTRIBUTES + [ BoolAttr("vertical_scrollbar"),BoolAttr("horizontal_scrollbar") ]
DEFAULT_HEXPAND = 1
DEFAULT_VEXPAND = 1
def __init__(self,**kwargs):
self.real_widget = fife.ScrollArea()
self._content = None
super(ScrollArea,self).__init__(**kwargs)
def addChild(self,widget):
self.content = widget
widget.parent = self
def removeChild(self,widget):
if self._content != widget:
raise RuntimeError("%s does not have %s as direct child widget." % (str(self),str(widget)))
self.content = None
widget.parent = None
def _setContent(self,content):
if content is None:
self.real_widget.setContent(content)
else:
self.real_widget.setContent(content.real_widget)
self._content = content
def _getContent(self): return self._content
content = property(_getContent,_setContent)
def deepApply(self,visitorFunc, leaves_first = True):
if leaves_first:
if self._content: self._content.deepApply(visitorFunc, leaves_first = leaves_first)
visitorFunc(self)
if not leaves_first:
if self._content: self._content.deepApply(visitorFunc, leaves_first = leaves_first)
def resizeToContent(self,recurse=True):
if self._content is None: return
if recurse:
self.content.resizeToContent(recurse=recurse)
self.size = self.min_size
def _visibilityToScrollPolicy(self,visibility):
if visibility:
return fife.ScrollArea.SHOW_AUTO
return fife.ScrollArea.SHOW_NEVER
def _scrollPolicyToVisibility(self,policy):
if policy == fife.ScrollArea.SHOW_NEVER:
return False
return True
def _setHorizontalScrollbar(self,visibility):
self.real_widget.setHorizontalScrollPolicy( self._visibilityToScrollPolicy(visibility) )
def _setVerticalScrollbar(self,visibility):
self.real_widget.setVerticalScrollPolicy( self._visibilityToScrollPolicy(visibility) )
def _getHorizontalScrollbar(self):
return self._scrollPolicyToVisibility( self.real_widget.getHorizontalScrollPolicy() )
def _getVerticalScrollbar(self):
return self._scrollPolicyToVisibility( self.real_widget.getVerticalScrollPolicy() )
def sizeChanged(self):
if self.content:
self.content.width = max(self.content.width,self.width-5)
self.content.height = max(self.content.height,self.height-5)
def getVerticalMaxScroll(self):
return self.real_widget.getVerticalMaxScroll()
def getHorizontalMaxScroll(self):
return self.real_widget.getHorizontalMaxScroll()
def _getHorizontalScrollAmount(self):
return self.real_widget.getHorizontalScrollAmount()
def _setHorizontalScrollAmount(self, scroll_amount):
return self.real_widget.setHorizontalScrollAmount(scroll_amount)
def _getVerticalScrollAmount(self):
return self.real_widget.getVerticalScrollAmount()
def _setVerticalScrollAmount(self, scroll_amount):
return self.real_widget.setVerticalScrollAmount(scroll_amount)
vertical_scrollbar = property(_getVerticalScrollbar,_setVerticalScrollbar)
horizontal_scrollbar = property(_getHorizontalScrollbar,_setHorizontalScrollbar)
horizontal_scroll_amount = property(_getHorizontalScrollAmount, _setHorizontalScrollAmount)
vertical_scroll_amount = property(_getVerticalScrollAmount, _setVerticalScrollAmount)
| lgpl-2.1 | -4,644,638,224,177,974,000 | 35.336 | 101 | 0.71952 | false |
leliel12/scikit-criteria | doc/source/conf.py | 1 | 6770 | # -*- coding: utf-8 -*-
#
# Scikit-Criteria documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 3 02:18:36 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(os.path.join("..", "..")))
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# to retrieve scikit criteria metadata
os.environ["SKCRITERIA_IN_SETUP"] = "True"
import skcriteria
# modules to mock in readthedocs
MOCK_MODULES = []
#~ ["numpy", "scipy",
#~ "matplotlib", "matplotlib.pyplot",
#~ "matplotlib.cm", "matplotlib.patches",
#~ "matplotlib.spines", "matplotlib.projections.polar",
#~ "matplotlib.projections", "matplotlib.path"]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
'nbsphinx']
numpydoc_class_members_toctree = False
nbsphinx_execute = 'always'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = skcriteria.NAME
copyright = u'2015-2016-2017-2018, Juan B. Cabral - Nadia A. Luczywo'
author = u'Juan BC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = skcriteria.VERSION
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
if on_rtd:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
else: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_favicon = "_static/favicon.ico"
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Scikit-Criteriadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Scikit-Criteria.tex', u'Scikit-Criteria Documentation',
u'Juan BC', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'scikit-criteria', u'Scikit-Criteria Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Scikit-Criteria', u'Scikit-Criteria Documentation',
author, 'Scikit-Criteria', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
def setup(app):
app.add_stylesheet('css/skcriteria.css')
app.add_javascript('js/skcriteria.js')
| bsd-3-clause | 4,692,782,530,310,709,000 | 30.055046 | 79 | 0.67223 | false |
andela/troupon | troupon/middleware/general.py | 1 | 1525 |
from django.conf import settings
from deals.models import Category, Advertiser, NIGERIAN_LOCATIONS, KENYAN_LOCATIONS, ALL_LOCATIONS
class SharedContextMiddleware(object):
"""
Middleware class that injects common data that's needed
when rendering most of view responses into the context
of the template_response objects returned by the views.
The injected data includes:
cities: includes a paginated listing of all state
choices/cities available on the site.
categories : includes a paginated listing of all categories
available on the site.
advertisers: includes a paginated listing of all advertisers
available on the site.
search_options : includes any current search query and the
default state choice for searches.
"""
def process_template_response(self, request, response):
"""
Middleware hook method called immediately after the
view function returns a response.
"""
response.context_data.update({
'cities': ALL_LOCATIONS,
'categories': Category.objects.all(),
'advertisers': Advertiser.objects.all(),
'search_options': {
'query': request.GET.get('q', ''),
'default_city': settings.DEALS.get('default_search_city', 25),
},
# until newsletter subscription is implemented
'show_newsletter_section': False,
})
return response
| mit | 7,258,990,352,408,417,000 | 37.125 | 98 | 0.637377 | false |
polera/rblwatch | rblwatch/rblwatch.py | 1 | 6297 | #!/usr/bin/env python
import sys
import socket
import re
from IPy import IP
from dns.resolver import Resolver, NXDOMAIN, NoNameservers, Timeout, NoAnswer
from threading import Thread
RBLS = [
'aspews.ext.sorbs.net',
'b.barracudacentral.org',
'bl.deadbeef.com',
'bl.emailbasura.org',
'bl.spamcannibal.org',
'bl.spamcop.net',
'blackholes.five-ten-sg.com',
'blacklist.woody.ch',
'bogons.cymru.com',
'cbl.abuseat.org',
'cdl.anti-spam.org.cn',
'combined.abuse.ch',
'combined.rbl.msrbl.net',
'db.wpbl.info',
'dnsbl-1.uceprotect.net',
'dnsbl-2.uceprotect.net',
'dnsbl-3.uceprotect.net',
'dnsbl.cyberlogic.net',
'dnsbl.dronebl.org',
'dnsbl.inps.de',
'dnsbl.njabl.org',
'dnsbl.sorbs.net',
'drone.abuse.ch',
'duinv.aupads.org',
'dul.dnsbl.sorbs.net',
'dul.ru',
'dyna.spamrats.com',
'dynip.rothen.com',
'http.dnsbl.sorbs.net'
'images.rbl.msrbl.net',
'ips.backscatterer.org',
'ix.dnsbl.manitu.net',
'korea.services.net',
'misc.dnsbl.sorbs.net',
'noptr.spamrats.com',
'ohps.dnsbl.net.au',
'omrs.dnsbl.net.au',
'orvedb.aupads.org',
'osps.dnsbl.net.au',
'osrs.dnsbl.net.au',
'owfs.dnsbl.net.au',
'owps.dnsbl.net.au'
'pbl.spamhaus.org',
'phishing.rbl.msrbl.net',
'probes.dnsbl.net.au'
'proxy.bl.gweep.ca',
'proxy.block.transip.nl',
'psbl.surriel.com',
'rdts.dnsbl.net.au',
'relays.bl.gweep.ca',
'relays.bl.kundenserver.de',
'relays.nether.net',
'residential.block.transip.nl',
'ricn.dnsbl.net.au',
'rmst.dnsbl.net.au',
'sbl.spamhaus.org',
'short.rbl.jp',
'smtp.dnsbl.sorbs.net',
'socks.dnsbl.sorbs.net',
'spam.abuse.ch',
'spam.dnsbl.sorbs.net',
'spam.rbl.msrbl.net',
'spam.spamrats.com',
'spamlist.or.kr',
'spamrbl.imp.ch',
't3direct.dnsbl.net.au',
'tor.dnsbl.sectoor.de',
'torserver.tor.dnsbl.sectoor.de',
'ubl.lashback.com',
'ubl.unsubscore.com',
'virbl.bit.nl',
'virus.rbl.jp',
'virus.rbl.msrbl.net',
'web.dnsbl.sorbs.net',
'wormrbl.imp.ch',
'xbl.spamhaus.org',
'zen.spamhaus.org',
'zombie.dnsbl.sorbs.net',
]
class Lookup(Thread):
def __init__(self, host, dnslist, listed, resolver):
Thread.__init__(self)
self.host = host
self.listed = listed
self.dnslist = dnslist
self.resolver = resolver
def run(self):
try:
host_record = self.resolver.query(self.host, "A")
if len(host_record) > 0:
self.listed[self.dnslist]['LISTED'] = True
self.listed[self.dnslist]['HOST'] = host_record[0].address
text_record = self.resolver.query(self.host, "TXT")
if len(text_record) > 0:
self.listed[self.dnslist]['TEXT'] = "\n".join(text_record[0].strings)
self.listed[self.dnslist]['ERROR'] = False
except NXDOMAIN:
self.listed[self.dnslist]['ERROR'] = True
self.listed[self.dnslist]['ERRORTYPE'] = NXDOMAIN
except NoNameservers:
self.listed[self.dnslist]['ERROR'] = True
self.listed[self.dnslist]['ERRORTYPE'] = NoNameservers
except Timeout:
self.listed[self.dnslist]['ERROR'] = True
self.listed[self.dnslist]['ERRORTYPE'] = Timeout
except NameError:
self.listed[self.dnslist]['ERROR'] = True
self.listed[self.dnslist]['ERRORTYPE'] = NameError
except NoAnswer:
self.listed[self.dnslist]['ERROR'] = True
self.listed[self.dnslist]['ERRORTYPE'] = NoAnswer
class RBLSearch(object):
def __init__(self, lookup_host):
self.lookup_host = lookup_host
self._listed = None
self.resolver = Resolver()
self.resolver.timeout = 0.2
self.resolver.lifetime = 1.0
def search(self):
if self._listed is not None:
pass
else:
ip = IP(self.lookup_host)
host = ip.reverseName()
if ip.version() == 4:
host = re.sub('.in-addr.arpa.', '', host)
elif ip.version() == 6:
host = re.sub('.ip6.arpa.', '', host)
self._listed = {'SEARCH_HOST': self.lookup_host}
threads = []
for LIST in RBLS:
self._listed[LIST] = {'LISTED': False}
query = Lookup("%s.%s" % (host, LIST), LIST, self._listed, self.resolver)
threads.append(query)
query.start()
for thread in threads:
thread.join()
return self._listed
listed = property(search)
def print_results(self):
listed = self.listed
print("")
print("--- DNSBL Report for %s ---" % listed['SEARCH_HOST'])
for key in listed:
if key == 'SEARCH_HOST':
continue
if not listed[key].get('ERROR'):
if listed[key]['LISTED']:
print("Results for %s: %s" % (key, listed[key]['LISTED']))
print(" + Host information: %s" % \
(listed[key]['HOST']))
if 'TEXT' in listed[key].keys():
print(" + Additional information: %s" % \
(listed[key]['TEXT']))
else:
#print "*** Error contacting %s ***" % key
pass
if __name__ == "__main__":
# Tests!
try:
if len(sys.argv) > 1:
print("Looking up: %s (please wait)" % sys.argv[1])
ip = sys.argv[1]
pat = re.compile("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}")
is_ip_address = pat.match(ip)
if not is_ip_address:
try:
ip = socket.gethostbyname(ip)
print("Hostname %s resolved to ip %s" % (sys.argv[1],ip))
except socket.error:
print("IP %s can't be resolved" % ip)
ip = ""
if ip:
searcher = RBLSearch(ip)
searcher.print_results()
else:
print("""Usage summary:
rblwatch <ip address to lookup> """)
except KeyboardInterrupt:
pass
| bsd-2-clause | -1,166,850,851,907,662,000 | 30.80303 | 89 | 0.532476 | false |
anchore/anchore-engine | tests/unit/anchore_engine/services/policy_engine/engine/policy/gates/test_vulnerabilities.py | 1 | 44126 | import datetime
import json
import os
import re
from unittest.mock import Mock
import pytest
from anchore_engine.common.models.policy_engine import ImageVulnerabilitiesReport
from anchore_engine.db import Image
from anchore_engine.db.entities.policy_engine import (
DistroMapping,
FeedGroupMetadata,
FeedMetadata,
)
from anchore_engine.services.policy_engine import init_feed_registry
from anchore_engine.services.policy_engine.engine.policy.gate import ExecutionContext
from anchore_engine.services.policy_engine.engine.policy.gates.vulnerabilities import (
FeedOutOfDateTrigger,
UnsupportedDistroTrigger,
VulnerabilitiesGate,
VulnerabilityBlacklistTrigger,
VulnerabilityMatchTrigger,
)
from anchore_engine.services.policy_engine.engine.vulns.providers import (
GrypeProvider,
LegacyProvider,
)
DISTRO_MAPPINGS = [
DistroMapping(from_distro="alpine", to_distro="alpine", flavor="ALPINE"),
DistroMapping(from_distro="busybox", to_distro="busybox", flavor="BUSYB"),
DistroMapping(from_distro="centos", to_distro="rhel", flavor="RHEL"),
DistroMapping(from_distro="debian", to_distro="debian", flavor="DEB"),
DistroMapping(from_distro="fedora", to_distro="rhel", flavor="RHEL"),
DistroMapping(from_distro="ol", to_distro="ol", flavor="RHEL"),
DistroMapping(from_distro="rhel", to_distro="rhel", flavor="RHEL"),
DistroMapping(from_distro="ubuntu", to_distro="ubuntu", flavor="DEB"),
DistroMapping(from_distro="amzn", to_distro="amzn", flavor="RHEL"),
DistroMapping(from_distro="redhat", to_distro="rhel", flavor="RHEL"),
]
MAPPINGS_MAP = {mapping.from_distro: mapping for mapping in DISTRO_MAPPINGS}
@pytest.fixture
def set_provider(monkeypatch):
def _set_provider(provider_name=None):
provider = LegacyProvider
if provider_name == "grype":
provider = GrypeProvider
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.policy.gates.vulnerabilities.get_vulnerabilities_provider",
lambda: provider,
)
return _set_provider
@pytest.fixture
def load_vulnerabilities_report_file(request):
module_path = os.path.dirname(request.module.__file__)
test_name = os.path.splitext(os.path.basename(request.module.__file__))[0]
def _load_vulnerabilities_report_file(file_name):
"""
Load a json file containing the vulnerabilities report into an instance of ImageVulnerabilitiesReport.
The files should all be stored in the tests/unit/anchore_engine/services/policy_engine/policy/gates/test_vulnerabilities folder.
"""
with open(os.path.join(module_path, test_name, file_name)) as file:
json_data = json.load(file)
return ImageVulnerabilitiesReport.from_json(json_data)
return _load_vulnerabilities_report_file
@pytest.fixture
def setup_mocks_vulnerabilities_gate(
load_vulnerabilities_report_file, monkeypatch, set_provider
):
set_provider()
# required for VulnerabilitiesGate.prepare_context
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.policy.gates.vulnerabilities.get_thread_scoped_session",
lambda: None,
)
# required for VulnerabilitiesGate.prepare_context
# mocks anchore_engine.services.policy_engine.engine.vulns.providers.LegacyProvider.get_image_vulnerabilities
def _setup_mocks_vulnerabilities_gate(file_name):
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.vulns.providers.LegacyProvider.get_image_vulnerabilities",
lambda image, db_session: load_vulnerabilities_report_file(file_name),
)
return _setup_mocks_vulnerabilities_gate
@pytest.fixture
def mock_distromapping_query(monkeypatch):
# mocks DB query in anchore_engine.db.entities.policy_engine.DistroMapping.distros_for
mock_db = Mock()
mock_db.query().get = lambda x: MAPPINGS_MAP.get(x, None)
monkeypatch.setattr(
"anchore_engine.db.entities.policy_engine.get_thread_scoped_session",
lambda: mock_db,
)
@pytest.fixture
def setup_mocks_feed_out_of_date_trigger(monkeypatch, mock_distromapping_query):
# required for FeedOutOfDateTrigger.evaluate
# setup for anchore_engine.services.policy_engine.engine.feeds.feeds.FeedRegistry.registered_vulnerability_feed_names
init_feed_registry()
def _setup_mocks(feed_group_metadata):
# required for FeedOutOfDateTrigger.evaluate
# mocks anchore_engine.services.policy_engine.engine.feeds.db.get_feed_group_detached
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.policy.gates.vulnerabilities.get_feed_group_detached",
lambda x, y: feed_group_metadata,
)
return _setup_mocks
@pytest.fixture
def setup_mocks_unsupported_distro_trigger(monkeypatch, mock_distromapping_query):
# required for UnsupportedDistroTrigger.evaluate
# setup for anchore_engine.services.policy_engine.engine.feeds.feeds.FeedRegistry.registered_vulnerability_feed_names
init_feed_registry()
# required for UnsupportedDistroTrigger.evaluate
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.feeds.feeds.get_session",
lambda: None,
)
def _setup_mocks(feed_metadata):
# required for UnsupportedDistroTrigger.evaluate
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.feeds.feeds.get_feed_json",
lambda db_session, feed_name: feed_metadata.to_json(),
)
return _setup_mocks
class TestVulnerabilitiesGate:
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, feed_group_metadata, expected_trigger_fired",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
FeedGroupMetadata(
last_sync=datetime.datetime.utcnow() - datetime.timedelta(days=2),
name="test-feed-out-of-date",
),
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
FeedGroupMetadata(
last_sync=datetime.datetime.utcnow(),
name="test-feed-not-out-of-date",
),
False,
),
],
)
def test_feed_out_of_date_trigger(
self,
image_obj,
mock_vuln_report,
feed_group_metadata,
expected_trigger_fired,
setup_mocks_vulnerabilities_gate,
setup_mocks_feed_out_of_date_trigger,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
setup_mocks_feed_out_of_date_trigger(feed_group_metadata)
vulns_gate = VulnerabilitiesGate()
trigger = FeedOutOfDateTrigger(
parent_gate_cls=VulnerabilitiesGate, max_days_since_sync="1"
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
if expected_trigger_fired:
assert (
trigger.fired[0].msg
== f"The vulnerability feed for this image distro is older than MAXAGE ({trigger.max_age.value()}) days"
)
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, feed_metadata, expected_trigger_fired",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
FeedMetadata(
name="vulnerabilities", groups=[FeedGroupMetadata(name="debian:10")]
),
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
FeedMetadata(
name="vulnerabilities", groups=[FeedGroupMetadata(name="debian:9")]
),
True,
),
],
)
def test_unsupported_distro_trigger(
self,
image_obj,
mock_vuln_report,
feed_metadata,
expected_trigger_fired,
setup_mocks_vulnerabilities_gate,
setup_mocks_unsupported_distro_trigger,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
setup_mocks_unsupported_distro_trigger(feed_metadata)
vulns_gate = VulnerabilitiesGate()
trigger = UnsupportedDistroTrigger(parent_gate_cls=VulnerabilitiesGate)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
if expected_trigger_fired:
assert (
trigger.fired[0].msg
== f"Distro-specific feed data not found for distro namespace: {image_obj.distro_namespace}. Cannot perform CVE scan OS/distro packages"
)
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, vulnerability_ids, vendor_only, expected_trigger_fired",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"CVE-2020-13529", # One matching vuln
"false",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"CVE-2020-13579", # One fake vuln
"false",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"CVE-2020-13529", # One matching vulns (not a won't fix)
"true", # Vendor only
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_wont-fix.json",
"CVE-2020-15719", # One matching vulns (was changed to won't fix for the purposes of this test)
"true", # Vendor only
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"CVE-2020-13529, CVE-2020-13579", # One matching vuln and one fake vuln
"false",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"CVE-2020-13525, CVE-2004-0975", # Two fake vulns
"false",
False,
),
],
)
def test_vulnerabilities_blacklist_trigger(
self,
image_obj,
mock_vuln_report,
vulnerability_ids,
vendor_only,
expected_trigger_fired,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityBlacklistTrigger(
parent_gate_cls=VulnerabilitiesGate,
vulnerability_ids=vulnerability_ids,
vendor_only=vendor_only,
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
if expected_trigger_fired:
assert re.fullmatch(
r"Blacklisted vulnerabilities detected: \[((\'CVE-\d{4}-\d{4,}\')(, )?)+\]",
trigger.fired[0].msg,
)
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, fix_available, expected_trigger_fired, expected_number_triggers",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_fix-available.json",
"true",
True,
1,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_fix-available.json",
"false",
False,
0,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"true",
False,
0,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"false",
True,
1,
),
],
)
def test_vulnerability_match_trigger_fix_available(
self,
image_obj,
mock_vuln_report,
fix_available,
expected_trigger_fired,
expected_number_triggers,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
fix_available=fix_available,
package_type="all",
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
assert len(trigger.fired) == expected_number_triggers
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, vendor_only, expected_trigger_fired, expected_number_triggers",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"true",
True,
1,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"false",
True,
1,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_wont-fix.json",
"true",
False,
0,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_wont-fix.json",
"false",
True,
1,
),
],
)
def test_vulnerability_match_trigger_vendor_only(
self,
image_obj,
mock_vuln_report,
vendor_only,
expected_trigger_fired,
expected_number_triggers,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
vendor_only=vendor_only,
package_type="all",
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
assert len(trigger.fired) == expected_number_triggers
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, max_days_since_creation, expected_trigger_fired, expected_number_triggers",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"1000000",
False,
0,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"0",
True,
1,
),
],
)
def test_vulnerability_match_trigger_max_days_since_creation(
self,
image_obj,
mock_vuln_report,
max_days_since_creation,
expected_trigger_fired,
expected_number_triggers,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
max_days_since_creation=max_days_since_creation,
package_type="all",
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
assert len(trigger.fired) == expected_number_triggers
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, max_days_since_fix, expected_trigger_fired, expected_number_triggers",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_fix-available.json",
"1000000",
False,
0,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_fix-available.json",
"0",
True,
1,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"1000000",
False,
0,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"0",
False,
0,
),
],
)
def test_vulnerability_match_trigger_max_days_since_fix(
self,
image_obj,
mock_vuln_report,
max_days_since_fix,
expected_trigger_fired,
expected_number_triggers,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
max_days_since_creation=max_days_since_fix,
fix_available="true",
package_type="all",
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
assert len(trigger.fired) == expected_number_triggers
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, package_path_exclude, expected_trigger_fired, expected_number_triggers",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_non-os_will-fix.json",
"/usr/.*",
True,
1,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_non-os_will-fix.json",
"/bin/.*",
False,
0,
),
],
)
def test_vulnerability_match_trigger_package_path_exclude(
self,
image_obj,
mock_vuln_report,
package_path_exclude,
expected_trigger_fired,
expected_number_triggers,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
package_path_exclude=package_path_exclude,
package_type="non-os",
vendor_only=False,
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
assert len(trigger.fired) == expected_number_triggers
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, package_type, expected_trigger_fired",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"all",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"os",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_non-os_will-fix.json",
"os",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"non-os",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_non-os_will-fix.json",
"non-os",
True,
),
],
)
def test_vulnerability_match_trigger_package_type(
self,
image_obj,
mock_vuln_report,
package_type,
expected_trigger_fired,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate, package_type=package_type
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, severity_comparison, severity, expected_trigger_fired",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"=",
"unknown",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"=",
"negligible",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"=",
"low",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"=",
"medium",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"=",
"high",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"=",
"critical",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"<",
"medium",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
">",
"medium",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"!=",
"medium",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"<=",
"medium",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
">=",
"medium",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"<",
"unknown",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
">",
"critical",
False,
),
],
)
def test_vulnerability_match_trigger_severity_comparison(
self,
image_obj,
mock_vuln_report,
severity_comparison,
severity,
expected_trigger_fired,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
severity_comparison=severity_comparison,
severity=severity,
package_type="all",
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
if expected_trigger_fired:
assert len(trigger.fired) == 1
else:
assert len(trigger.fired) == 0
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, score_comparison, base_score, expected_trigger_fired",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
">",
"6.0",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"<",
"6.0",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"=",
"6.0",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
">=",
"6.0",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"<=",
"6.0",
False,
),
],
)
def test_vulnerability_match_trigger_cvssv3_base_score_comparison(
self,
image_obj,
mock_vuln_report,
score_comparison,
base_score,
expected_trigger_fired,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
cvss_v3_base_score_comparison=score_comparison,
cvss_v3_base_score=base_score,
package_type="all",
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
if expected_trigger_fired:
assert len(trigger.fired) == 1
else:
assert len(trigger.fired) == 0
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, score_comparison, exploitability_score, expected_trigger_fired",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
">",
"3.8",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"<",
"3.8",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"=",
"3.8",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
">=",
"3.8",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"<=",
"3.8",
True,
),
],
)
def test_vulnerability_match_trigger_cvssv3_exploitability_score_comparison(
self,
image_obj,
mock_vuln_report,
score_comparison,
exploitability_score,
expected_trigger_fired,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
cvss_v3_exploitability_score_comparison=score_comparison,
cvss_v3_exploitability_score=exploitability_score,
package_type="all",
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
if expected_trigger_fired:
assert len(trigger.fired) == 1
else:
assert len(trigger.fired) == 0
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, score_comparison, impact_score, expected_trigger_fired",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
">",
"3.6",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"<",
"3.6",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"=",
"3.6",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
">=",
"3.6",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix.json",
"<=",
"3.6",
False,
),
],
)
def test_vulnerability_match_trigger_cvssv3_impact_score_comparison(
self,
image_obj,
mock_vuln_report,
score_comparison,
impact_score,
expected_trigger_fired,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
cvss_v3_impact_score_comparison=score_comparison,
cvss_v3_impact_score=impact_score,
package_type="all",
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
if expected_trigger_fired:
assert len(trigger.fired) == 1
else:
assert len(trigger.fired) == 0
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, score_comparison, base_score, expected_trigger_fired",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
">",
"6.0",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
"<",
"6.0",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
"=",
"6.0",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
">=",
"6.0",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
"<=",
"6.0",
False,
),
],
)
def test_vulnerability_match_trigger_vendor_cvssv3_base_score_comparison(
self,
image_obj,
mock_vuln_report,
score_comparison,
base_score,
expected_trigger_fired,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
vendor_cvss_v3_base_score_comparison=score_comparison,
vendor_cvss_v3_base_score=base_score,
package_type="all",
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
if expected_trigger_fired:
assert len(trigger.fired) == 1
else:
assert len(trigger.fired) == 0
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, score_comparison, exploitability_score, expected_trigger_fired",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
">",
"3.8",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
"<",
"3.8",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
"=",
"3.8",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
">=",
"3.8",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
"<=",
"3.8",
True,
),
],
)
def test_vulnerability_match_trigger_vendor_cvssv3_exploitability_score_comparison(
self,
image_obj,
mock_vuln_report,
score_comparison,
exploitability_score,
expected_trigger_fired,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
vendor_cvss_v3_exploitability_score_comparison=score_comparison,
vendor_cvss_v3_exploitability_score=exploitability_score,
package_type="all",
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
if expected_trigger_fired:
assert len(trigger.fired) == 1
else:
assert len(trigger.fired) == 0
@pytest.mark.parametrize(
"image_obj, mock_vuln_report, score_comparison, impact_score, expected_trigger_fired",
[
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
">",
"3.6",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
"<",
"3.6",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
"=",
"3.6",
False,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
">=",
"3.6",
True,
),
(
Image(
id="1", user_id="admin", distro_name="debian", distro_version="10"
),
"debian_1_os_will-fix_vendor_cvssv3.json",
"<=",
"3.6",
False,
),
],
)
def test_vulnerability_match_trigger_vendor_cvssv3_impact_score_comparison(
self,
image_obj,
mock_vuln_report,
score_comparison,
impact_score,
expected_trigger_fired,
setup_mocks_vulnerabilities_gate,
):
setup_mocks_vulnerabilities_gate(mock_vuln_report)
vulns_gate = VulnerabilitiesGate()
trigger = VulnerabilityMatchTrigger(
parent_gate_cls=VulnerabilitiesGate,
vendor_cvss_v3_impact_score_comparison=score_comparison,
vendor_cvss_v3_impact_score=impact_score,
package_type="all",
)
exec_context = ExecutionContext(db_session=None, configuration={})
vulns_gate.prepare_context(image_obj, exec_context)
trigger.evaluate(image_obj, exec_context)
assert trigger.did_fire == expected_trigger_fired
if expected_trigger_fired:
assert len(trigger.fired) == 1
else:
assert len(trigger.fired) == 0
| apache-2.0 | -2,858,890,194,203,769,300 | 33.500391 | 152 | 0.491366 | false |
curious-abhinav/change-point | docs/conf.py | 1 | 7843 | # -*- coding: utf-8 -*-
#
# Change Point documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Change Point'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'change-pointdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'change-point.tex',
u'Change Point Documentation',
u"Abhinav Singh", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'change-point', u'Change Point Documentation',
[u"Abhinav Singh"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'change-point', u'Change Point Documentation',
u"Abhinav Singh", 'Change Point',
'Finding change points in high dimensional responses of neural circuits', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| mit | -955,102,581,346,214,800 | 31.143443 | 96 | 0.697565 | false |
tbunnyman/FAiler | FAiler/faile.py | 1 | 2872 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
from datetime import datetime
from FAiler.exceptions import FAError
class FAile():
"""
Represents a file downloaded from FurAffinity.
The base parameters of this class are public access read safe by design
FAile.directory: the directory to the supplied file or pwd if not supplied
FAile.filename: the full name of the supplied file. This never changes
FAile.date: When this was uploaded
FAile.artist: the name of the user who uploaded this
FAile.name: the name of the submitted file
FAile.fileType: The extension of the submitted file
Some example files
1201126929.[koh]_fooooom_toaster.jpg
1362739849.wolfy-nail_2013-03-01-djzing.jpg
"""
directory = None
filename = None
date = None
artist = None
name = None
fileType = None
def __init__(self, faFile):
"""
This accepts both a standard name or with path to file.
:param faFile: Name of or path to a file from FA
:raise: FAError if the file name cannot be parsed.
"""
self.directory = os.path.dirname(faFile)
self.filename = os.path.basename(faFile)
self._parse_name(self.filename) # Raises on fail
def __repr__(self):
return 'FAile({})'.format(os.path.join(self.directory, self.filename))
def __str__(self):
"""
:return: the filename as a string
"""
return str(self.filename)
def _parse_name(self, name):
"""
Don't repeat yourself.
This assigns everything from the filename and raises FAError on fail
:raises: FAError if name does not parse
"""
faRe = re.compile(r'(\d+)\.([\w\[\]~.-]+?)_(\S+)\.(\w{2,4})')
parsed = re.match(faRe, name)
if parsed is None:
raise FAError("Unable to parse file name: " + name)
self.date, self.artist, self.name, self.fileType = parsed.groups()
self.date = datetime.fromtimestamp(int(self.date))
def clean_reupload(self):
"""
Often enough someone downloads a file from FA and then re-uploads it
This checks for that and changes the Number, User, & Name to that of
the "original" uploader.
The basename is kept unchanged
ex;
>>> from FAiler import FAile
>>> f2 = FAile('1362168441.shim_1362116845.furball_shim_bday2013.jpg')
>>> "{} - {}.{}".format(f2.artist, f2.name, f2.fileType)
'shim - 1362116845.furball_shim_bday2013.jpg'
>>> f2.clean_reupload()
>>> "{0.artist} - {0.name}.{0.fileType}".format(f2)
'furball - shim_bday2013.jpg'
"""
try:
self._parse_name("{0.name}.{0.fileType}".format(self))
except FAError:
pass # We don't care if parse fails this time around
| bsd-3-clause | 931,305,739,904,286,800 | 32.788235 | 78 | 0.612465 | false |
academichero/jobs | modules/board/migrations/0003_auto_20160622_2034.py | 1 | 1227 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-22 23:34
from __future__ import unicode_literals
import datetime
import django.core.validators
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('board', '0002_job_featured'),
]
operations = [
migrations.AddField(
model_name='job',
name='internal_job',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='job',
name='open_job',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='job',
name='validity',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 22, 23, 34, 36, 512801, tzinfo=utc)),
preserve_default=False,
),
migrations.AddField(
model_name='job',
name='workload',
field=models.PositiveIntegerField(default=20, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(45)]),
preserve_default=False,
),
]
| mit | 998,880,232,990,725,600 | 29.675 | 162 | 0.600652 | false |
pavel-paulau/perfrunner | perfrunner/tests/kv.py | 1 | 12499 | from logger import logger
from perfrunner.helpers.cbmonitor import timeit, with_stats
from perfrunner.helpers.worker import (
pillowfight_data_load_task,
pillowfight_task,
)
from perfrunner.tests import PerfTest
from perfrunner.workloads.pathoGen import PathoGen
from perfrunner.workloads.tcmalloc import WorkloadGen
class KVTest(PerfTest):
@with_stats
def access(self, *args):
super().access(*args)
def run(self):
self.load()
self.wait_for_persistence()
self.hot_load()
self.reset_kv_stats()
self.access()
self.report_kpi()
class ReadLatencyTest(KVTest):
"""Enable reporting of GET latency."""
COLLECTORS = {'latency': True}
def _report_kpi(self):
self.reporter.post(
*self.metrics.kv_latency(operation='get')
)
class MixedLatencyTest(ReadLatencyTest):
"""Enable reporting of GET and SET latency."""
def _report_kpi(self):
for operation in ('get', 'set'):
self.reporter.post(
*self.metrics.kv_latency(operation=operation)
)
class DGMTest(KVTest):
COLLECTORS = {'disk': True, 'net': False}
class DGMCompactionTest(DGMTest):
def run(self):
self.load()
self.wait_for_persistence()
self.hot_load()
self.reset_kv_stats()
self.compact_bucket(wait=False)
self.access()
self.report_kpi()
class DGMCompactedTest(DGMTest):
def run(self):
self.load()
self.wait_for_persistence()
self.compact_bucket()
self.hot_load()
self.reset_kv_stats()
self.access()
self.report_kpi()
class ReadLatencyDGMTest(KVTest):
COLLECTORS = {'disk': True, 'latency': True, 'net': False}
def _report_kpi(self):
self.reporter.post(
*self.metrics.kv_latency(operation='get')
)
class MixedLatencyDGMTest(ReadLatencyDGMTest):
def _report_kpi(self):
for operation in ('get', 'set'):
self.reporter.post(
*self.metrics.kv_latency(operation=operation)
)
class ReadLatencyDGMCompactionTest(DGMCompactionTest):
COLLECTORS = {'disk': True, 'latency': True, 'net': False}
def _report_kpi(self):
self.reporter.post(
*self.metrics.kv_latency(operation='get')
)
class ReadLatencyDGMCompactedTest(DGMCompactedTest):
COLLECTORS = {'disk': True, 'latency': True, 'net': False}
def _report_kpi(self):
for percentile in 99.9, 99.99:
self.reporter.post(
*self.metrics.kv_latency(operation='get', percentile=percentile)
)
class DurabilityTest(KVTest):
"""Enable reporting of persistTo=1 and replicateTo=1 latency."""
COLLECTORS = {'durability': True}
def _report_kpi(self):
for operation in ('replicate_to', 'persist_to'):
self.reporter.post(
*self.metrics.kv_latency(operation=operation,
collector='durability')
)
class SubDocTest(MixedLatencyTest):
"""Enable reporting of SubDoc latency."""
COLLECTORS = {'latency': True}
class XATTRTest(MixedLatencyTest):
"""Enable reporting of XATTR latency."""
COLLECTORS = {'latency': True}
def run(self):
self.load()
self.xattr_load()
self.wait_for_persistence()
self.access()
self.report_kpi()
class DrainTest(DGMCompactionTest):
"""Enable reporting of average disk write queue size."""
def _report_kpi(self):
self.reporter.post(
*self.metrics.avg_disk_write_queue()
)
class InitialLoadTest(DrainTest):
@with_stats
def load(self, *args, **kwargs):
super().load(*args, **kwargs)
def run(self):
self.load()
self.report_kpi()
class IngestionTest(KVTest):
COLLECTORS = {'disk': True, 'net': False}
@with_stats
def access(self, *args, **kwargs):
super(KVTest, self).access(*args, **kwargs)
self.wait_for_persistence()
def _report_kpi(self):
self.reporter.post(
*self.metrics.avg_total_queue_age()
)
class WarmupTest(PerfTest):
"""Measure the time it takes to perform cluster warm up."""
COLLECTORS = {'net': False}
@with_stats
def warmup(self):
self.remote.stop_server()
self.remote.drop_caches()
return self._warmup()
@timeit
def _warmup(self):
self.remote.start_server()
for master in self.cluster_spec.masters:
for bucket in self.test_config.buckets:
self.monitor.monitor_warmup(self.memcached, master, bucket)
def _report_kpi(self, time_elapsed):
self.reporter.post(
*self.metrics.elapsed_time(time_elapsed)
)
def run(self):
self.load()
self.wait_for_persistence()
self.access()
self.wait_for_persistence()
time_elapsed = self.warmup()
self.report_kpi(time_elapsed)
class FragmentationTest(PerfTest):
"""Implement the append-only workload.
Scenario:
1. Single node.
2. Load X items, 700-1400 bytes, average 1KB (11-22 fields).
3. Append data
3.1. Mark first 80% of items as working set.
3.2. Randomly update 75% of items in working set by adding 1 field at a time (62 bytes).
3.3. Mark first 40% of items as working set.
3.4. Randomly update 75% of items in working set by adding 1 field at a time (62 bytes).
3.5. Mark first 20% of items as working set.
3.6. Randomly update 75% of items in working set by adding 1 field at a time (62 bytes).
4. Repeat step #3 5 times.
See workloads/tcmalloc.py for details.
Scenario described above allows to spot issues with memory/allocator
fragmentation.
"""
COLLECTORS = {'net': False}
@with_stats
def load_and_append(self):
password = self.test_config.bucket.password
WorkloadGen(self.test_config.load_settings.items,
self.master_node, self.test_config.buckets[0],
password).run()
def calc_fragmentation_ratio(self) -> float:
ratios = []
for target in self.target_iterator:
port = self.rest.get_memcached_port(target.node)
stats = self.memcached.get_stats(target.node, port, target.bucket,
stats='memory')
ratio = int(stats[b'mem_used']) / int(stats[b'total_heap_bytes'])
ratios.append(ratio)
ratio = 100 * (1 - sum(ratios) / len(ratios))
ratio = round(ratio, 1)
logger.info('Fragmentation: {}'.format(ratio))
return ratio
def _report_kpi(self):
ratio = self.calc_fragmentation_ratio()
self.reporter.post(
*self.metrics.fragmentation_ratio(ratio)
)
def run(self):
self.load_and_append()
self.report_kpi()
class FragmentationLargeTest(FragmentationTest):
@with_stats
def load_and_append(self):
password = self.test_config.bucket.password
WorkloadGen(self.test_config.load_settings.items,
self.master_node, self.test_config.buckets[0], password,
small=False).run()
class PathoGenTest(FragmentationTest):
@with_stats
def access(self, *args):
for target in self.target_iterator:
pg = PathoGen(num_items=self.test_config.load_settings.items,
num_workers=self.test_config.load_settings.workers,
num_iterations=self.test_config.load_settings.iterations,
frozen_mode=False,
host=target.node, port=8091,
bucket=target.bucket, password=target.password)
pg.run()
def _report_kpi(self):
self.reporter.post(
*self.metrics.avg_memcached_rss()
)
self.reporter.post(
*self.metrics.max_memcached_rss()
)
def run(self):
self.access()
self.report_kpi()
class PathoGenFrozenTest(PathoGenTest):
@with_stats
def access(self):
for target in self.target_iterator:
pg = PathoGen(num_items=self.test_config.load_settings.items,
num_workers=self.test_config.load_settings.workers,
num_iterations=self.test_config.load_settings.iterations,
frozen_mode=True,
host=target.node, port=8091,
bucket=target.bucket, password=target.password)
pg.run()
class ThroughputTest(KVTest):
def _measure_curr_ops(self) -> int:
ops = 0
for bucket in self.test_config.buckets:
for server in self.cluster_spec.servers:
port = self.rest.get_memcached_port(server)
stats = self.memcached.get_stats(server, port, bucket)
for stat in b'cmd_get', b'cmd_set':
ops += int(stats[stat])
return ops
def _report_kpi(self):
total_ops = self._measure_curr_ops()
self.reporter.post(
*self.metrics.kv_throughput(total_ops)
)
class EvictionTest(KVTest):
COLLECTORS = {'net': False}
def reset_kv_stats(self):
pass
def _measure_ejected_items(self) -> int:
ejected_items = 0
for bucket in self.test_config.buckets:
for hostname, _ in self.rest.get_node_stats(self.master_node,
bucket):
host = hostname.split(':')[0]
port = self.rest.get_memcached_port(host)
stats = self.memcached.get_stats(host, port, bucket)
ejected_items += int(stats[b'vb_active_auto_delete_count'])
ejected_items += int(stats[b'vb_pending_auto_delete_count'])
ejected_items += int(stats[b'vb_replica_auto_delete_count'])
return ejected_items
def _report_kpi(self):
ejected_items = self._measure_ejected_items()
self.reporter.post(
*self.metrics.kv_throughput(ejected_items)
)
class PillowFightTest(PerfTest):
"""Use cbc-pillowfight from libcouchbase to drive cluster."""
ALL_BUCKETS = True
def load(self, *args):
PerfTest.load(self, task=pillowfight_data_load_task)
@with_stats
def access(self, *args):
self.download_certificate()
PerfTest.access(self, task=pillowfight_task)
def _report_kpi(self, *args):
self.reporter.post(
*self.metrics.max_ops()
)
def run(self):
self.load()
self.wait_for_persistence()
self.access()
self.report_kpi()
class CompressionTest(PillowFightTest):
COLLECTORS = {'iostat': False, 'net': False}
@with_stats
@timeit
def wait_for_compression(self):
for master in self.cluster_spec.masters:
for bucket in self.test_config.buckets:
self.monitor.monitor_compression(self.memcached, master, bucket)
def _report_kpi(self, time_elapsed: float):
self.reporter.post(
*self.metrics.compression_throughput(time_elapsed)
)
def run(self):
self.load()
time_elapsed = self.wait_for_compression()
self.report_kpi(time_elapsed)
class CompactionTest(KVTest):
COLLECTORS = {'net': False}
@with_stats
@timeit
def compact(self):
self.compact_bucket()
def _report_kpi(self, time_elapsed):
self.reporter.post(
*self.metrics.elapsed_time(time_elapsed)
)
def run(self):
self.load()
self.wait_for_persistence()
self.hot_load()
self.access_bg()
time_elapsed = self.compact()
self.report_kpi(time_elapsed)
class MemoryOverheadTest(PillowFightTest):
COLLECTORS = {'iostat': False, 'net': False}
PF_KEY_SIZE = 20
def _report_kpi(self):
self.reporter.post(
*self.metrics.memory_overhead(key_size=self.PF_KEY_SIZE)
)
@with_stats
def access(self, *args):
self.sleep()
class CpuUtilizationTest(KVTest):
def _report_kpi(self, *args, **kwargs):
self.reporter.post(
*self.metrics.cpu_utilization()
)
| apache-2.0 | -1,810,513,932,737,493,200 | 23.799603 | 96 | 0.587807 | false |
guillempalou/scikit-cv | skcv/video/segmentation/region_tracking.py | 1 | 3595 | import networkx as nx
import numpy as np
def bipartite_region_tracking(partition, optical_flow, reliability,
matching_th=0.1, reliability_th=0.2):
"""
Parameters
----------
partition: numpy array
A 3D label array where each label represents a region
optical_flow: numpy array
A 3D,2 array representing optical flow values for each frame
reliability: numpy array
A 3D array representing the flow reliability
matching_th: float, optional
matching threshold for the bipartite matching
reliability_th: float, optional
reliability threshold to stop tracking
Returns
-------
A NetworkX graph object with adjacency relations
"""
dimensions = len(partition.shape)
if dimensions != 3: # pragma: no cover
raise ValueError("Dimensions must be 3")
# link regions across frames
# perform a weighted bipartite matchings
frames = partition.shape[0]
width = partition.shape[1]
height = partition.shape[2]
new_partition = np.zeros_like(partition)
#the first frame is the same
new_partition[0,...] = partition[0,...]
current_label = np.max(np.unique(partition[0,...]))+1
for frame in range(frames-1):
labels = np.unique(new_partition[frame, ...])
labels_next = np.unique(partition[frame+1, ...])
# create a graph matching contours
bipartite = nx.Graph()
bipartite.add_nodes_from([l for l in labels])
bipartite.add_nodes_from([l for l in labels_next])
# find the correspondence of each label to the next frame
for label in labels:
px, py = np.where(new_partition[frame, ...] == label)
# find the mean reliability
rel = np.mean(reliability[frame, px, py])
if rel < reliability_th: # pragma: no cover
continue
# find where the regions projects to the next frame
npx = px + optical_flow[frame, px, py, 0]
npy = py + optical_flow[frame, px, py, 1]
#check for bounds
in_x = np.logical_and(0 <= npx, npx < width)
in_y = np.logical_and(0 <= npy, npy < height)
idx = np.logical_and(in_x, in_y)
npx = npx[idx]
npy = npy[idx]
count = np.bincount(partition[frame+1,
npx.astype(np.int),
npy.astype(np.int)].astype(np.int))
# get the count and eliminate weak correspondences
max_count = max(count)
nodes = np.nonzero(count > max_count*matching_th)[0]
weight = count[nodes]/max_count
for i, n in enumerate(nodes):
bipartite.add_edge(label, n, weight=weight[i])
# max weighted matching
matchings = nx.max_weight_matching(bipartite)
# assign propagated labels to the matchings
for a in matchings:
b = matchings[a]
#print("Match {0}-{1}".format(a,b))
if b not in labels_next:
continue
px, py = np.where(partition[frame+1, ...] == b)
new_partition[frame+1, px, py] = a
# assign new labels to non-matched regions
for n in bipartite.nodes():
if n not in labels_next:
continue
if n not in matchings:
px, py = np.where(partition[frame+1, ...] == n)
new_partition[frame+1, px, py] = current_label + 1
current_label += 1
return new_partition | bsd-3-clause | 868,079,825,263,690,500 | 31.107143 | 77 | 0.569124 | false |
iradicek/clara | clara/modeltograph.py | 1 | 1609 | '''
Converts Program model to a image (graph)
'''
import pygraphviz as pgv
def stmts_to_str(title, types, ss):
l = [title]
if types:
l.append(', '.join(['%s: %s' % x for x in types]))
for (v, e) in ss:
ls = str(e)
ls = ls.replace(r'\n', r'\\n')
ls = ls.replace(r'\r', r'\\r')
ls = ls.replace(r'\t', r'\\t')
l.append('%s := %s' % (v, ls))
ml = max([len(x) for x in l])
l.insert(2 if types else 1, '-' * ml)
return '\n'.join(l)
def create_graph(pm):
G = pgv.AGraph(directed=True)
for name, fnc in list(pm.fncs.items()):
fnclab = 'fun %s (%s) : %s --- ' % (
fnc.name,
', '.join(['%s : %s' % x for x in fnc.params]),
fnc.rettype)
types = list(fnc.types.items())
for loc in fnc.locs():
fnclabel = fnclab if loc == fnc.initloc else ''
label = stmts_to_str('%sL%s' % (fnclabel, loc,), types,
fnc.exprs(loc))
types = None
G.add_node('%s-%s' % (name, loc), label=label, shape='rectangle',
fontname='monospace')
for loc in fnc.locs():
locs = '%s-%s' % (name, loc)
loc2 = fnc.trans(loc, True)
locs2 = '%s-%s' % (name, loc2)
if loc2:
G.add_edge(locs, locs2, label='True')
loc2 = fnc.trans(loc, False)
locs2 = '%s-%s' % (name, loc2)
if loc2:
G.add_edge(locs, locs2, label='False')
G.layout('dot')
return G
| gpl-3.0 | -2,082,160,572,237,811,200 | 25.816667 | 77 | 0.444997 | false |
willmcgugan/rich | rich/palette.py | 1 | 3288 | from math import sqrt
from functools import lru_cache
from typing import Sequence, Tuple, TYPE_CHECKING
from .color_triplet import ColorTriplet
if TYPE_CHECKING:
from rich.table import Table
class Palette:
"""A palette of available colors."""
def __init__(self, colors: Sequence[Tuple[int, int, int]]):
self._colors = colors
def __getitem__(self, number: int) -> ColorTriplet:
return ColorTriplet(*self._colors[number])
def __rich__(self) -> "Table":
from rich.color import Color
from rich.style import Style
from rich.text import Text
from rich.table import Table
table = Table(
"index",
"RGB",
"Color",
title="Palette",
caption=f"{len(self._colors)} colors",
highlight=True,
caption_justify="right",
)
for index, color in enumerate(self._colors):
table.add_row(
str(index),
repr(color),
Text(" " * 16, style=Style(bgcolor=Color.from_rgb(*color))),
)
return table
# This is somewhat inefficient and needs caching
@lru_cache(maxsize=1024)
def match(self, color: Tuple[int, int, int]) -> int:
"""Find a color from a palette that most closely matches a given color.
Args:
color (Tuple[int, int, int]): RGB components in range 0 > 255.
Returns:
int: Index of closes matching color.
"""
red1, green1, blue1 = color
_sqrt = sqrt
get_color = self._colors.__getitem__
def get_color_distance(index: int) -> float:
"""Get the distance to a color."""
red2, green2, blue2 = get_color(index)
red_mean = (red1 + red2) // 2
red = red1 - red2
green = green1 - green2
blue = blue1 - blue2
return _sqrt(
(((512 + red_mean) * red * red) >> 8)
+ 4 * green * green
+ (((767 - red_mean) * blue * blue) >> 8)
)
min_index = min(range(len(self._colors)), key=get_color_distance)
return min_index
if __name__ == "__main__": # pragma: no cover
import colorsys
from typing import Iterable
from rich.color import Color
from rich.console import Console, ConsoleOptions
from rich.segment import Segment
from rich.style import Style
class ColorBox:
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> Iterable[Segment]:
height = console.size.height - 3
for y in range(0, height):
for x in range(options.max_width):
h = x / options.max_width
l = y / (height + 1)
r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
r2, g2, b2 = colorsys.hls_to_rgb(h, l + (1 / height / 2), 1.0)
bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
yield Segment("▄", Style(color=color, bgcolor=bgcolor))
yield Segment.line()
console = Console()
console.print(ColorBox())
| mit | 1,800,412,422,958,308,600 | 31.86 | 82 | 0.532562 | false |
liushuyu/DraftBin | pyscripts/acbs/lib/magic.py | 1 | 8071 | # coding: utf-8
'''
Python bindings for libmagic
'''
import ctypes
from collections import namedtuple
from ctypes import *
from ctypes.util import find_library
def _init():
"""
Loads the shared library through ctypes and returns a library
L{ctypes.CDLL} instance
"""
return ctypes.cdll.LoadLibrary(find_library('magic'))
_libraries = {}
_libraries['magic'] = _init()
# Flag constants for open and setflags
MAGIC_NONE = NONE = 0
MAGIC_DEBUG = DEBUG = 1
MAGIC_SYMLINK = SYMLINK = 2
MAGIC_COMPRESS = COMPRESS = 4
MAGIC_DEVICES = DEVICES = 8
MAGIC_MIME_TYPE = MIME_TYPE = 16
MAGIC_CONTINUE = CONTINUE = 32
MAGIC_CHECK = CHECK = 64
MAGIC_PRESERVE_ATIME = PRESERVE_ATIME = 128
MAGIC_RAW = RAW = 256
MAGIC_ERROR = ERROR = 512
MAGIC_MIME_ENCODING = MIME_ENCODING = 1024
MAGIC_MIME = MIME = 1040 # MIME_TYPE + MIME_ENCODING
MAGIC_APPLE = APPLE = 2048
MAGIC_NO_CHECK_COMPRESS = NO_CHECK_COMPRESS = 4096
MAGIC_NO_CHECK_TAR = NO_CHECK_TAR = 8192
MAGIC_NO_CHECK_SOFT = NO_CHECK_SOFT = 16384
MAGIC_NO_CHECK_APPTYPE = NO_CHECK_APPTYPE = 32768
MAGIC_NO_CHECK_ELF = NO_CHECK_ELF = 65536
MAGIC_NO_CHECK_TEXT = NO_CHECK_TEXT = 131072
MAGIC_NO_CHECK_CDF = NO_CHECK_CDF = 262144
MAGIC_NO_CHECK_TOKENS = NO_CHECK_TOKENS = 1048576
MAGIC_NO_CHECK_ENCODING = NO_CHECK_ENCODING = 2097152
MAGIC_NO_CHECK_BUILTIN = NO_CHECK_BUILTIN = 4173824
FileMagic = namedtuple('FileMagic', ('mime_type', 'encoding', 'name'))
class magic_set(Structure):
pass
magic_set._fields_ = []
magic_t = POINTER(magic_set)
_open = _libraries['magic'].magic_open
_open.restype = magic_t
_open.argtypes = [c_int]
_close = _libraries['magic'].magic_close
_close.restype = None
_close.argtypes = [magic_t]
_file = _libraries['magic'].magic_file
_file.restype = c_char_p
_file.argtypes = [magic_t, c_char_p]
_descriptor = _libraries['magic'].magic_descriptor
_descriptor.restype = c_char_p
_descriptor.argtypes = [magic_t, c_int]
_buffer = _libraries['magic'].magic_buffer
_buffer.restype = c_char_p
_buffer.argtypes = [magic_t, c_void_p, c_size_t]
_error = _libraries['magic'].magic_error
_error.restype = c_char_p
_error.argtypes = [magic_t]
_setflags = _libraries['magic'].magic_setflags
_setflags.restype = c_int
_setflags.argtypes = [magic_t, c_int]
_load = _libraries['magic'].magic_load
_load.restype = c_int
_load.argtypes = [magic_t, c_char_p]
_compile = _libraries['magic'].magic_compile
_compile.restype = c_int
_compile.argtypes = [magic_t, c_char_p]
_check = _libraries['magic'].magic_check
_check.restype = c_int
_check.argtypes = [magic_t, c_char_p]
_list = _libraries['magic'].magic_list
_list.restype = c_int
_list.argtypes = [magic_t, c_char_p]
_errno = _libraries['magic'].magic_errno
_errno.restype = c_int
_errno.argtypes = [magic_t]
class Magic(object):
def __init__(self, ms):
self._magic_t = ms
def close(self):
"""
Closes the magic database and deallocates any resources used.
"""
_close(self._magic_t)
def file(self, filename):
"""
Returns a textual description of the contents of the argument passed
as a filename or None if an error occurred and the MAGIC_ERROR flag
is set. A call to errno() will return the numeric error code.
"""
if isinstance(filename, bytes):
bi = filename
else:
try: # keep Python 2 compatibility
bi = bytes(filename, 'utf-8')
except TypeError:
bi = bytes(filename)
r = _file(self._magic_t, bi)
if isinstance(r, str):
return r
else:
return str(r, 'utf-8').encode('utf-8')
def descriptor(self, fd):
"""
Like the file method, but the argument is a file descriptor.
"""
return _descriptor(self._magic_t, fd)
def buffer(self, buf):
"""
Returns a textual description of the contents of the argument passed
as a buffer or None if an error occurred and the MAGIC_ERROR flag
is set. A call to errno() will return the numeric error code.
"""
r = _buffer(self._magic_t, buf, len(buf))
if isinstance(r, str):
return r
else:
return str(r).encode('utf-8')
def error(self):
"""
Returns a textual explanation of the last error or None
if there was no error.
"""
e = _error(self._magic_t)
if isinstance(e, str):
return e
else:
return str(e).encode('utf-8')
def setflags(self, flags):
"""
Set flags on the magic object which determine how magic checking
behaves; a bitwise OR of the flags described in libmagic(3), but
without the MAGIC_ prefix.
Returns -1 on systems that don't support utime(2) or utimes(2)
when PRESERVE_ATIME is set.
"""
return _setflags(self._magic_t, flags)
def load(self, filename=None):
"""
Must be called to load entries in the colon separated list of database
files passed as argument or the default database file if no argument
before any magic queries can be performed.
Returns 0 on success and -1 on failure.
"""
return _load(self._magic_t, filename)
def compile(self, dbs):
"""
Compile entries in the colon separated list of database files
passed as argument or the default database file if no argument.
Returns 0 on success and -1 on failure.
The compiled files created are named from the basename(1) of each file
argument with ".mgc" appended to it.
"""
return _compile(self._magic_t, dbs)
def check(self, dbs):
"""
Check the validity of entries in the colon separated list of
database files passed as argument or the default database file
if no argument.
Returns 0 on success and -1 on failure.
"""
return _check(self._magic_t, dbs)
def list(self, dbs):
"""
Check the validity of entries in the colon separated list of
database files passed as argument or the default database file
if no argument.
Returns 0 on success and -1 on failure.
"""
return _list(self._magic_t, dbs)
def errno(self):
"""
Returns a numeric error code. If return value is 0, an internal
magic error occurred. If return value is non-zero, the value is
an OS error code. Use the errno module or os.strerror() can be used
to provide detailed error information.
"""
return _errno(self._magic_t)
def open(flags):
"""
Returns a magic object on success and None on failure.
Flags argument as for setflags.
"""
return Magic(_open(flags))
# Objects used by `detect_from_` functions
mime_magic = Magic(_open(MAGIC_MIME))
mime_magic.load()
none_magic = Magic(_open(MAGIC_NONE))
none_magic.load()
def _create_filemagic(mime_detected, type_detected):
mime_type, mime_encoding = mime_detected.split('; ')
return FileMagic(name=type_detected, mime_type=mime_type,
encoding=mime_encoding.replace('charset=', ''))
def detect_from_filename(filename):
'''Detect mime type, encoding and file type from a filename
Returns a `FileMagic` namedtuple.
'''
return _create_filemagic(mime_magic.file(filename),
none_magic.file(filename))
def detect_from_fobj(fobj):
'''Detect mime type, encoding and file type from file-like object
Returns a `FileMagic` namedtuple.
'''
file_descriptor = fobj.fileno()
return _create_filemagic(mime_magic.descriptor(file_descriptor),
none_magic.descriptor(file_descriptor))
def detect_from_content(byte_content):
'''Detect mime type, encoding and file type from bytes
Returns a `FileMagic` namedtuple.
'''
return _create_filemagic(mime_magic.buffer(byte_content),
none_magic.buffer(byte_content))
| lgpl-2.1 | -4,565,356,466,739,291,600 | 28.137184 | 78 | 0.635237 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.