repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
erdincay/youtube-dl
|
youtube_dl/extractor/tnaflix.py
|
109
|
10280
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_duration,
str_to_int,
xpath_text,
)
class TNAFlixNetworkBaseIE(InfoExtractor):
# May be overridden in descendants if necessary
_CONFIG_REGEX = [
r'flashvars\.config\s*=\s*escape\("([^"]+)"',
r'<input[^>]+name="config\d?" value="([^"]+)"',
]
_TITLE_REGEX = r'<input[^>]+name="title" value="([^"]+)"'
_DESCRIPTION_REGEX = r'<input[^>]+name="description" value="([^"]+)"'
_UPLOADER_REGEX = r'<input[^>]+name="username" value="([^"]+)"'
_VIEW_COUNT_REGEX = None
_COMMENT_COUNT_REGEX = None
_AVERAGE_RATING_REGEX = None
_CATEGORIES_REGEX = r'<li[^>]*>\s*<span[^>]+class="infoTitle"[^>]*>Categories:</span>\s*<span[^>]+class="listView"[^>]*>(.+?)</span>\s*</li>'
def _extract_thumbnails(self, flix_xml):
def get_child(elem, names):
for name in names:
child = elem.find(name)
if child is not None:
return child
timeline = get_child(flix_xml, ['timeline', 'rolloverBarImage'])
if timeline is None:
return
pattern_el = get_child(timeline, ['imagePattern', 'pattern'])
if pattern_el is None or not pattern_el.text:
return
first_el = get_child(timeline, ['imageFirst', 'first'])
last_el = get_child(timeline, ['imageLast', 'last'])
if first_el is None or last_el is None:
return
first_text = first_el.text
last_text = last_el.text
if not first_text.isdigit() or not last_text.isdigit():
return
first = int(first_text)
last = int(last_text)
if first > last:
return
width = int_or_none(xpath_text(timeline, './imageWidth', 'thumbnail width'))
height = int_or_none(xpath_text(timeline, './imageHeight', 'thumbnail height'))
return [{
'url': self._proto_relative_url(pattern_el.text.replace('#', compat_str(i)), 'http:'),
'width': width,
'height': height,
} for i in range(first, last + 1)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
cfg_url = self._proto_relative_url(self._html_search_regex(
self._CONFIG_REGEX, webpage, 'flashvars.config'), 'http:')
cfg_xml = self._download_xml(
cfg_url, display_id, 'Downloading metadata',
transform_source=fix_xml_ampersands)
formats = []
def extract_video_url(vl):
return re.sub('speed=\d+', 'speed=', vl.text)
video_link = cfg_xml.find('./videoLink')
if video_link is not None:
formats.append({
'url': extract_video_url(video_link),
'ext': xpath_text(cfg_xml, './videoConfig/type', 'type', default='flv'),
})
for item in cfg_xml.findall('./quality/item'):
video_link = item.find('./videoLink')
if video_link is None:
continue
res = item.find('res')
format_id = None if res is None else res.text
height = int_or_none(self._search_regex(
r'^(\d+)[pP]', format_id, 'height', default=None))
formats.append({
'url': self._proto_relative_url(extract_video_url(video_link), 'http:'),
'format_id': format_id,
'height': height,
})
self._sort_formats(formats)
thumbnail = self._proto_relative_url(
xpath_text(cfg_xml, './startThumb', 'thumbnail'), 'http:')
thumbnails = self._extract_thumbnails(cfg_xml)
title = self._html_search_regex(
self._TITLE_REGEX, webpage, 'title') if self._TITLE_REGEX else self._og_search_title(webpage)
age_limit = self._rta_search(webpage)
duration = parse_duration(self._html_search_meta(
'duration', webpage, 'duration', default=None))
def extract_field(pattern, name):
return self._html_search_regex(pattern, webpage, name, default=None) if pattern else None
description = extract_field(self._DESCRIPTION_REGEX, 'description')
uploader = extract_field(self._UPLOADER_REGEX, 'uploader')
view_count = str_to_int(extract_field(self._VIEW_COUNT_REGEX, 'view count'))
comment_count = str_to_int(extract_field(self._COMMENT_COUNT_REGEX, 'comment count'))
average_rating = float_or_none(extract_field(self._AVERAGE_RATING_REGEX, 'average rating'))
categories_str = extract_field(self._CATEGORIES_REGEX, 'categories')
categories = categories_str.split(', ') if categories_str is not None else []
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'thumbnails': thumbnails,
'duration': duration,
'age_limit': age_limit,
'uploader': uploader,
'view_count': view_count,
'comment_count': comment_count,
'average_rating': average_rating,
'categories': categories,
'formats': formats,
}
class TNAFlixIE(TNAFlixNetworkBaseIE):
_VALID_URL = r'https?://(?:www\.)?tnaflix\.com/[^/]+/(?P<display_id>[^/]+)/video(?P<id>\d+)'
_TITLE_REGEX = r'<title>(.+?) - TNAFlix Porn Videos</title>'
_DESCRIPTION_REGEX = r'<h3 itemprop="description">([^<]+)</h3>'
_UPLOADER_REGEX = r'(?s)<span[^>]+class="infoTitle"[^>]*>Uploaded By:</span>(.+?)<div'
_TESTS = [{
# anonymous uploader, no categories
'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878',
'md5': 'ecf3498417d09216374fc5907f9c6ec0',
'info_dict': {
'id': '553878',
'display_id': 'Carmella-Decesare-striptease',
'ext': 'mp4',
'title': 'Carmella Decesare - striptease',
'thumbnail': 're:https?://.*\.jpg$',
'duration': 91,
'age_limit': 18,
'uploader': 'Anonymous',
'categories': [],
}
}, {
# non-anonymous uploader, categories
'url': 'https://www.tnaflix.com/teen-porn/Educational-xxx-video/video6538',
'md5': '0f5d4d490dbfd117b8607054248a07c0',
'info_dict': {
'id': '6538',
'display_id': 'Educational-xxx-video',
'ext': 'mp4',
'title': 'Educational xxx video',
'description': 'md5:b4fab8f88a8621c8fabd361a173fe5b8',
'thumbnail': 're:https?://.*\.jpg$',
'duration': 164,
'age_limit': 18,
'uploader': 'bobwhite39',
'categories': ['Amateur Porn', 'Squirting Videos', 'Teen Girls 18+'],
}
}, {
'url': 'https://www.tnaflix.com/amateur-porn/bunzHD-Ms.Donk/video358632',
'only_matching': True,
}]
class EMPFlixIE(TNAFlixNetworkBaseIE):
_VALID_URL = r'https?://(?:www\.)?empflix\.com/videos/(?P<display_id>.+?)-(?P<id>[0-9]+)\.html'
_UPLOADER_REGEX = r'<span[^>]+class="infoTitle"[^>]*>Uploaded By:</span>(.+?)</li>'
_TESTS = [{
'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html',
'md5': 'b1bc15b6412d33902d6e5952035fcabc',
'info_dict': {
'id': '33051',
'display_id': 'Amateur-Finger-Fuck',
'ext': 'mp4',
'title': 'Amateur Finger Fuck',
'description': 'Amateur solo finger fucking.',
'thumbnail': 're:https?://.*\.jpg$',
'duration': 83,
'age_limit': 18,
'uploader': 'cwbike',
'categories': ['Amateur', 'Anal', 'Fisting', 'Home made', 'Solo'],
}
}, {
'url': 'http://www.empflix.com/videos/[AROMA][ARMD-718]-Aoi-Yoshino-Sawa-25826.html',
'only_matching': True,
}]
class MovieFapIE(TNAFlixNetworkBaseIE):
_VALID_URL = r'https?://(?:www\.)?moviefap\.com/videos/(?P<id>[0-9a-f]+)/(?P<display_id>[^/]+)\.html'
_VIEW_COUNT_REGEX = r'<br>Views\s*<strong>([\d,.]+)</strong>'
_COMMENT_COUNT_REGEX = r'<span[^>]+id="comCount"[^>]*>([\d,.]+)</span>'
_AVERAGE_RATING_REGEX = r'Current Rating\s*<br>\s*<strong>([\d.]+)</strong>'
_CATEGORIES_REGEX = r'(?s)<div[^>]+id="vid_info"[^>]*>\s*<div[^>]*>.+?</div>(.*?)<br>'
_TESTS = [{
# normal, multi-format video
'url': 'http://www.moviefap.com/videos/be9867c9416c19f54a4a/experienced-milf-amazing-handjob.html',
'md5': '26624b4e2523051b550067d547615906',
'info_dict': {
'id': 'be9867c9416c19f54a4a',
'display_id': 'experienced-milf-amazing-handjob',
'ext': 'mp4',
'title': 'Experienced MILF Amazing Handjob',
'description': 'Experienced MILF giving an Amazing Handjob',
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
'uploader': 'darvinfred06',
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['Amateur', 'Masturbation', 'Mature', 'Flashing'],
}
}, {
# quirky single-format case where the extension is given as fid, but the video is really an flv
'url': 'http://www.moviefap.com/videos/e5da0d3edce5404418f5/jeune-couple-russe.html',
'md5': 'fa56683e291fc80635907168a743c9ad',
'info_dict': {
'id': 'e5da0d3edce5404418f5',
'display_id': 'jeune-couple-russe',
'ext': 'flv',
'title': 'Jeune Couple Russe',
'description': 'Amateur',
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
'uploader': 'whiskeyjar',
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['Amateur', 'Teen'],
}
}]
|
unlicense
|
thomaskeck/root
|
tutorials/tmva/keras/MulticlassKeras.py
|
5
|
2342
|
#!/usr/bin/env python
from ROOT import TMVA, TFile, TTree, TCut, gROOT
from os.path import isfile
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.regularizers import l2
from keras import initializations
from keras.optimizers import SGD
# Setup TMVA
TMVA.Tools.Instance()
TMVA.PyMethodBase.PyInitialize()
output = TFile.Open('TMVA.root', 'RECREATE')
factory = TMVA.Factory('TMVAClassification', output,
'!V:!Silent:Color:DrawProgressBar:Transformations=D,G:AnalysisType=multiclass')
# Load data
if not isfile('tmva_example_multiple_background.root'):
createDataMacro = gROOT.GetTutorialsDir() + '/tmva/createData.C'
print(createDataMacro)
gROOT.ProcessLine('.L {}'.format(createDataMacro))
gROOT.ProcessLine('create_MultipleBackground(4000)')
data = TFile.Open('tmva_example_multiple_background.root')
signal = data.Get('TreeS')
background0 = data.Get('TreeB0')
background1 = data.Get('TreeB1')
background2 = data.Get('TreeB2')
dataloader = TMVA.DataLoader('dataset')
for branch in signal.GetListOfBranches():
dataloader.AddVariable(branch.GetName())
dataloader.AddTree(signal, 'Signal')
dataloader.AddTree(background0, 'Background_0')
dataloader.AddTree(background1, 'Background_1')
dataloader.AddTree(background2, 'Background_2')
dataloader.PrepareTrainingAndTestTree(TCut(''),
'SplitMode=Random:NormMode=NumEvents:!V')
# Generate model
# Define initialization
def normal(shape, name=None):
return initializations.normal(shape, scale=0.05, name=name)
# Define model
model = Sequential()
model.add(Dense(32, init=normal, activation='relu', W_regularizer=l2(1e-5), input_dim=4))
#model.add(Dense(32, init=normal, activation='relu', W_regularizer=l2(1e-5)))
model.add(Dense(4, init=normal, activation='softmax'))
# Set loss and optimizer
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01), metrics=['accuracy',])
# Store model to file
model.save('model.h5')
model.summary()
# Book methods
factory.BookMethod(dataloader, TMVA.Types.kFisher, 'Fisher',
'!H:!V:Fisher:VarTransform=D,G')
factory.BookMethod(dataloader, TMVA.Types.kPyKeras, "PyKeras",
'H:!V:VarTransform=D,G:FilenameModel=model.h5:NumEpochs=20:BatchSize=32')
# Run TMVA
factory.TrainAllMethods()
factory.TestAllMethods()
factory.EvaluateAllMethods()
|
lgpl-2.1
|
TNT-Samuel/Coding-Projects
|
DNS Server/Source/Lib/idlelib/undo.py
|
7
|
11046
|
import string
from idlelib.delegator import Delegator
# tkintter import not needed because module does not create widgets,
# although many methods operate on text widget arguments.
#$ event <<redo>>
#$ win <Control-y>
#$ unix <Alt-z>
#$ event <<undo>>
#$ win <Control-z>
#$ unix <Control-z>
#$ event <<dump-undo-state>>
#$ win <Control-backslash>
#$ unix <Control-backslash>
class UndoDelegator(Delegator):
max_undo = 1000
def __init__(self):
Delegator.__init__(self)
self.reset_undo()
def setdelegate(self, delegate):
if self.delegate is not None:
self.unbind("<<undo>>")
self.unbind("<<redo>>")
self.unbind("<<dump-undo-state>>")
Delegator.setdelegate(self, delegate)
if delegate is not None:
self.bind("<<undo>>", self.undo_event)
self.bind("<<redo>>", self.redo_event)
self.bind("<<dump-undo-state>>", self.dump_event)
def dump_event(self, event):
from pprint import pprint
pprint(self.undolist[:self.pointer])
print("pointer:", self.pointer, end=' ')
print("saved:", self.saved, end=' ')
print("can_merge:", self.can_merge, end=' ')
print("get_saved():", self.get_saved())
pprint(self.undolist[self.pointer:])
return "break"
def reset_undo(self):
self.was_saved = -1
self.pointer = 0
self.undolist = []
self.undoblock = 0 # or a CommandSequence instance
self.set_saved(1)
def set_saved(self, flag):
if flag:
self.saved = self.pointer
else:
self.saved = -1
self.can_merge = False
self.check_saved()
def get_saved(self):
return self.saved == self.pointer
saved_change_hook = None
def set_saved_change_hook(self, hook):
self.saved_change_hook = hook
was_saved = -1
def check_saved(self):
is_saved = self.get_saved()
if is_saved != self.was_saved:
self.was_saved = is_saved
if self.saved_change_hook:
self.saved_change_hook()
def insert(self, index, chars, tags=None):
self.addcmd(InsertCommand(index, chars, tags))
def delete(self, index1, index2=None):
self.addcmd(DeleteCommand(index1, index2))
# Clients should call undo_block_start() and undo_block_stop()
# around a sequence of editing cmds to be treated as a unit by
# undo & redo. Nested matching calls are OK, and the inner calls
# then act like nops. OK too if no editing cmds, or only one
# editing cmd, is issued in between: if no cmds, the whole
# sequence has no effect; and if only one cmd, that cmd is entered
# directly into the undo list, as if undo_block_xxx hadn't been
# called. The intent of all that is to make this scheme easy
# to use: all the client has to worry about is making sure each
# _start() call is matched by a _stop() call.
def undo_block_start(self):
if self.undoblock == 0:
self.undoblock = CommandSequence()
self.undoblock.bump_depth()
def undo_block_stop(self):
if self.undoblock.bump_depth(-1) == 0:
cmd = self.undoblock
self.undoblock = 0
if len(cmd) > 0:
if len(cmd) == 1:
# no need to wrap a single cmd
cmd = cmd.getcmd(0)
# this blk of cmds, or single cmd, has already
# been done, so don't execute it again
self.addcmd(cmd, 0)
def addcmd(self, cmd, execute=True):
if execute:
cmd.do(self.delegate)
if self.undoblock != 0:
self.undoblock.append(cmd)
return
if self.can_merge and self.pointer > 0:
lastcmd = self.undolist[self.pointer-1]
if lastcmd.merge(cmd):
return
self.undolist[self.pointer:] = [cmd]
if self.saved > self.pointer:
self.saved = -1
self.pointer = self.pointer + 1
if len(self.undolist) > self.max_undo:
##print "truncating undo list"
del self.undolist[0]
self.pointer = self.pointer - 1
if self.saved >= 0:
self.saved = self.saved - 1
self.can_merge = True
self.check_saved()
def undo_event(self, event):
if self.pointer == 0:
self.bell()
return "break"
cmd = self.undolist[self.pointer - 1]
cmd.undo(self.delegate)
self.pointer = self.pointer - 1
self.can_merge = False
self.check_saved()
return "break"
def redo_event(self, event):
if self.pointer >= len(self.undolist):
self.bell()
return "break"
cmd = self.undolist[self.pointer]
cmd.redo(self.delegate)
self.pointer = self.pointer + 1
self.can_merge = False
self.check_saved()
return "break"
class Command:
# Base class for Undoable commands
tags = None
def __init__(self, index1, index2, chars, tags=None):
self.marks_before = {}
self.marks_after = {}
self.index1 = index1
self.index2 = index2
self.chars = chars
if tags:
self.tags = tags
def __repr__(self):
s = self.__class__.__name__
t = (self.index1, self.index2, self.chars, self.tags)
if self.tags is None:
t = t[:-1]
return s + repr(t)
def do(self, text):
pass
def redo(self, text):
pass
def undo(self, text):
pass
def merge(self, cmd):
return 0
def save_marks(self, text):
marks = {}
for name in text.mark_names():
if name != "insert" and name != "current":
marks[name] = text.index(name)
return marks
def set_marks(self, text, marks):
for name, index in marks.items():
text.mark_set(name, index)
class InsertCommand(Command):
# Undoable insert command
def __init__(self, index1, chars, tags=None):
Command.__init__(self, index1, None, chars, tags)
def do(self, text):
self.marks_before = self.save_marks(text)
self.index1 = text.index(self.index1)
if text.compare(self.index1, ">", "end-1c"):
# Insert before the final newline
self.index1 = text.index("end-1c")
text.insert(self.index1, self.chars, self.tags)
self.index2 = text.index("%s+%dc" % (self.index1, len(self.chars)))
self.marks_after = self.save_marks(text)
##sys.__stderr__.write("do: %s\n" % self)
def redo(self, text):
text.mark_set('insert', self.index1)
text.insert(self.index1, self.chars, self.tags)
self.set_marks(text, self.marks_after)
text.see('insert')
##sys.__stderr__.write("redo: %s\n" % self)
def undo(self, text):
text.mark_set('insert', self.index1)
text.delete(self.index1, self.index2)
self.set_marks(text, self.marks_before)
text.see('insert')
##sys.__stderr__.write("undo: %s\n" % self)
def merge(self, cmd):
if self.__class__ is not cmd.__class__:
return False
if self.index2 != cmd.index1:
return False
if self.tags != cmd.tags:
return False
if len(cmd.chars) != 1:
return False
if self.chars and \
self.classify(self.chars[-1]) != self.classify(cmd.chars):
return False
self.index2 = cmd.index2
self.chars = self.chars + cmd.chars
return True
alphanumeric = string.ascii_letters + string.digits + "_"
def classify(self, c):
if c in self.alphanumeric:
return "alphanumeric"
if c == "\n":
return "newline"
return "punctuation"
class DeleteCommand(Command):
# Undoable delete command
def __init__(self, index1, index2=None):
Command.__init__(self, index1, index2, None, None)
def do(self, text):
self.marks_before = self.save_marks(text)
self.index1 = text.index(self.index1)
if self.index2:
self.index2 = text.index(self.index2)
else:
self.index2 = text.index(self.index1 + " +1c")
if text.compare(self.index2, ">", "end-1c"):
# Don't delete the final newline
self.index2 = text.index("end-1c")
self.chars = text.get(self.index1, self.index2)
text.delete(self.index1, self.index2)
self.marks_after = self.save_marks(text)
##sys.__stderr__.write("do: %s\n" % self)
def redo(self, text):
text.mark_set('insert', self.index1)
text.delete(self.index1, self.index2)
self.set_marks(text, self.marks_after)
text.see('insert')
##sys.__stderr__.write("redo: %s\n" % self)
def undo(self, text):
text.mark_set('insert', self.index1)
text.insert(self.index1, self.chars)
self.set_marks(text, self.marks_before)
text.see('insert')
##sys.__stderr__.write("undo: %s\n" % self)
class CommandSequence(Command):
# Wrapper for a sequence of undoable cmds to be undone/redone
# as a unit
def __init__(self):
self.cmds = []
self.depth = 0
def __repr__(self):
s = self.__class__.__name__
strs = []
for cmd in self.cmds:
strs.append(" %r" % (cmd,))
return s + "(\n" + ",\n".join(strs) + "\n)"
def __len__(self):
return len(self.cmds)
def append(self, cmd):
self.cmds.append(cmd)
def getcmd(self, i):
return self.cmds[i]
def redo(self, text):
for cmd in self.cmds:
cmd.redo(text)
def undo(self, text):
cmds = self.cmds[:]
cmds.reverse()
for cmd in cmds:
cmd.undo(text)
def bump_depth(self, incr=1):
self.depth = self.depth + incr
return self.depth
def _undo_delegator(parent): # htest #
from tkinter import Toplevel, Text, Button
from idlelib.percolator import Percolator
undowin = Toplevel(parent)
undowin.title("Test UndoDelegator")
x, y = map(int, parent.geometry().split('+')[1:])
undowin.geometry("+%d+%d" % (x, y + 175))
text = Text(undowin, height=10)
text.pack()
text.focus_set()
p = Percolator(text)
d = UndoDelegator()
p.insertfilter(d)
undo = Button(undowin, text="Undo", command=lambda:d.undo_event(None))
undo.pack(side='left')
redo = Button(undowin, text="Redo", command=lambda:d.redo_event(None))
redo.pack(side='left')
dump = Button(undowin, text="Dump", command=lambda:d.dump_event(None))
dump.pack(side='left')
if __name__ == "__main__":
import unittest
unittest.main('idlelib.idle_test.test_undo', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_undo_delegator)
|
gpl-3.0
|
calebmadrigal/algorithms-in-python
|
heap.py
|
1
|
3668
|
"""heap.py - implementation of a heap priority queue. """
__author__ = "Caleb Madrigal"
__date__ = "2015-02-17"
import math
from enum import Enum
from autoresizelist import AutoResizeList
class HeapType(Enum):
maxheap = 1
minheap = 2
class Heap:
def __init__(self, initial_data=None, heap_type=HeapType.maxheap):
self.heap_type = heap_type
if heap_type == HeapType.maxheap:
self.comparator = lambda x, y: x > y
else:
self.comparator = lambda x, y: x < y
self.data = AutoResizeList()
if initial_data is not None:
self.build_heap(initial_data)
self._size = len(self.data)
def _left_child(self, index):
return 2*index + 1
def _right_child(self, index):
return 2*index + 2
def _parent(self, index):
return math.floor((index - 1) / 2.0)
def _is_root(self, index):
return index == 0
def _swap(self, i1, i2):
self.data[i1], self.data[i2] = self.data[i2], self.data[i1]
def build_heap(self, initial_data):
for i in initial_data:
self.data.prepend(i)
self.heap_down(0)
def heap_up(self, index):
# If we are at the root, return - we are done
if self._is_root(index):
return
# Else, compare the current node with the parent node, and if this node should be higher
# then the parent node, then swap and recursively call on the parent index
parent_index = self._parent(index)
if self.comparator(self.data[index], self.data[parent_index]):
self._swap(index, parent_index)
self.heap_up(parent_index)
def heap_down(self, index):
left_index = self._left_child(index)
right_index = self._right_child(index)
try:
left = self.data[left_index]
except IndexError:
left = None
try:
right = self.data[right_index]
except IndexError:
right = None
# Find the largest child
largest_child = left
largest_child_index = left_index
if left is not None and right is not None:
if self.comparator(right, left):
largest_child = right
largest_child_index = right_index
elif right is not None:
largest_child = right
largest_child_index = right_index
# If the largest child is not None and is higher priority than the current, then swap
# and recursively call on on the child index
if largest_child is not None and self.comparator(largest_child, self.data[index]):
self._swap(index, largest_child_index)
self.heap_down(largest_child_index)
def push(self, item):
insert_index = self._size # Insert at the end
self._size += 1
self.data[insert_index] = item
self.heap_up(insert_index)
return self
def peek(self):
return self.data[0]
def pop(self):
if len(self.data) < 1 or self.data[0] is None:
return None
# Take item from the root
item = self.data[0]
# Move the bottom-most, right-most item to the root
self.data[0] = self.data[self._size-1]
self.data[self._size-1] = None
self._size -= 1
self.heap_down(0)
return item
def size(self):
return self._size
def __repr__(self):
return str(self.data)
if __name__ == "__main__":
import unittest
testsuite = unittest.TestLoader().discover('test', pattern="*heap*")
unittest.TextTestRunner(verbosity=1).run(testsuite)
|
mit
|
RafaelOrtiz/EbookReader
|
samples/myRead/epubview/navmap.py
|
2
|
3449
|
from lxml import etree
import gtk
import logging
logging.basicConfig(level=logging.DEBUG)
_log = logging.getLogger('navmap')
class NavPoint(object):
def __init__(self, label, contentsrc, children=[]):
self._label = label
self._contentsrc = contentsrc
self._children = children
def get_label(self):
return self._label
def get_contentsrc(self):
return self._contentsrc
def get_children(self):
return self._children
class NavMap(object):
def __init__(self, opffile, ncxfile, basepath):
_log.debug('opffile: %s, ncxfile: %s, basepath: %s', opffile, ncxfile, basepath)
self._basepath = basepath
self._opffile = opffile
self._tree = etree.parse(ncxfile)
_log.debug('##################################')
_log.debug('XML: %s', etree.tostring(self._tree))
_log.debug('##################################')
self._root = self._tree.getroot()
self._gtktreestore = gtk.TreeStore(str, str)
self._flattoc = []
self._populate_flattoc()
self._populate_toc()
def _populate_flattoc(self):
tree = etree.parse(self._opffile)
root = tree.getroot()
itemmap = {}
manifest = root.find('.//{http://www.idpf.org/2007/opf}manifest')
for element in manifest.iterfind('{http://www.idpf.org/2007/opf}item'):
itemmap[element.get('id')] = element
spine = root.find('.//{http://www.idpf.org/2007/opf}spine')
for element in spine.iterfind('{http://www.idpf.org/2007/opf}itemref'):
idref = element.get('idref')
href = itemmap[idref].get('href')
self._flattoc.append(self._basepath + href)
self._opffile.close()
def _populate_toc(self):
navmap = self._root.find(
'{http://www.daisy.org/z3986/2005/ncx/}navMap')
for navpoint in navmap.iterfind(
'./{http://www.daisy.org/z3986/2005/ncx/}navPoint'):
self._process_navpoint(navpoint)
def _gettitle(self, navpoint):
text = navpoint.find('./{http://www.daisy.org/z3986/2005/ncx/}' +
'navLabel/{http://www.daisy.org/z3986/2005/ncx/}text')
return text.text
def _getcontent(self, navpoint):
_log.debug('navpoint:%s, tag:%s', navpoint, navpoint.tag)
text = navpoint.find(
'./{http://www.daisy.org/z3986/2005/ncx/}content')
return self._basepath + text.get('src')
def _process_navpoint(self, navpoint, parent=None):
title = self._gettitle(navpoint)
content = self._getcontent(navpoint)
#print title, content
iter = self._gtktreestore.append(parent, [title, content])
#self._flattoc.append((title, content))
childnavpointlist = list(navpoint.iterfind(
'./{http://www.daisy.org/z3986/2005/ncx/}navPoint'))
if len(childnavpointlist):
for childnavpoint in childnavpointlist:
self._process_navpoint(childnavpoint, parent=iter)
else:
return
def get_gtktreestore(self):
'''
Returns a GtkTreeModel representation of the
Epub table of contents
'''
return self._gtktreestore
def get_flattoc(self):
'''
Returns a flat (linear) list of files to be
rendered.
'''
return self._flattoc
#t = TocParser('/home/sayamindu/Desktop/Test/OPS/fb.ncx')
|
gpl-2.0
|
garbled1/ansible
|
lib/ansible/utils/module_docs_fragments/eos.py
|
87
|
5493
|
#
# (c) 2015, Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. This value applies to either I(cli) or I(eapi). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, http=80, https=443).
default: 0 (use common port)
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
either the CLI login or the eAPI authentication depending on which
transport is used. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This is a common argument used for either I(cli)
or I(eapi) transports. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH keyfile to use to authenticate the connection to
the remote device. This argument is only used for I(cli) transports.
If the value is not specified in the task, the value of environment
variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
transport:
description:
- Configures the transport connection to use when connecting to the
remote device.
required: true
choices:
- eapi
- cli
default: cli
use_ssl:
description:
- Configures the I(transport) to use SSL if set to true only when the
C(transport=eapi). If the transport
argument is not eapi, this value is ignored.
default: yes
choices: ['yes', 'no']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates. If the transport
argument is not eapi, this value is ignored.
choices: ['yes', 'no']
"""
|
gpl-3.0
|
mrquim/mrquimrepo
|
repo/script.module.exodus/lib/resources/lib/indexers/movies.py
|
5
|
48031
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from resources.lib.modules import trakt
from resources.lib.modules import cleangenre
from resources.lib.modules import cleantitle
from resources.lib.modules import control
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import metacache
from resources.lib.modules import playcount
from resources.lib.modules import workers
from resources.lib.modules import views
from resources.lib.modules import utils
from resources.lib.indexers import navigator
import os,sys,re,json,urllib,urlparse,datetime
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?',''))) if len(sys.argv) > 1 else dict()
action = params.get('action')
control.moderator()
class movies:
def __init__(self):
self.list = []
self.imdb_link = 'http://www.imdb.com'
self.trakt_link = 'http://api.trakt.tv'
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.year_date = (self.datetime - datetime.timedelta(days = 365)).strftime('%Y-%m-%d')
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.trakt_user = control.setting('trakt.user').strip()
self.imdb_user = control.setting('imdb.user').replace('ur', '')
self.tm_user = control.setting('tm.user')
self.fanart_tv_user = control.setting('fanart.tv.user')
self.user = str(control.setting('fanart.tv.user')) + str(control.setting('tm.user'))
self.lang = control.apiLanguage()['trakt']
self.hidecinema = control.setting('hidecinema')
self.search_link = 'http://api.trakt.tv/search/movie?limit=20&page=1&query='
self.fanart_tv_art_link = 'http://webservice.fanart.tv/v3/movies/%s'
self.fanart_tv_level_link = 'http://webservice.fanart.tv/v3/level'
self.tm_art_link = 'http://api.themoviedb.org/3/movie/%s/images?api_key=%s&language=en-US&include_image_language=en,%s,null' % ('%s', self.tm_user, self.lang)
self.tm_img_link = 'https://image.tmdb.org/t/p/w%s%s'
self.persons_link = 'http://www.imdb.com/search/name?count=100&name='
self.personlist_link = 'http://www.imdb.com/search/name?count=100&gender=male,female'
self.person_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&role=%s&sort=year,desc&count=40&start=1'
self.keyword_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie,documentary&num_votes=100,&release_date=,date[0]&keywords=%s&sort=moviemeter,asc&count=40&start=1'
self.oscars_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&groups=oscar_best_picture_winners&sort=year,desc&count=40&start=1'
self.theaters_link = 'http://www.imdb.com/search/title?title_type=feature&num_votes=1000,&release_date=date[365],date[0]&sort=release_date_us,desc&count=40&start=1'
self.year_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=100,&production_status=released&year=%s,%s&sort=moviemeter,asc&count=40&start=1'
if self.hidecinema == 'true':
self.popular_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&groups=top_1000&release_date=,date[90]&sort=moviemeter,asc&count=40&start=1'
self.views_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&sort=num_votes,desc&release_date=,date[90]&count=40&start=1'
self.featured_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&release_date=date[365],date[90]&sort=moviemeter,asc&count=40&start=1'
self.genre_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie,documentary&num_votes=100,&release_date=,date[90]&genres=%s&sort=moviemeter,asc&count=40&start=1'
self.language_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=100,&production_status=released&primary_language=%s&sort=moviemeter,asc&release_date=,date[90]&count=40&start=1'
self.certification_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=100,&production_status=released&certificates=us:%s&sort=moviemeter,asc&release_date=,date[90]&count=40&start=1'
self.boxoffice_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&sort=boxoffice_gross_us,desc&release_date=,date[90]&count=40&start=1'
else:
self.popular_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&groups=top_1000&sort=moviemeter,asc&count=40&start=1'
self.views_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&sort=num_votes,desc&count=40&start=1'
self.featured_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&release_date=date[365],date[60]&sort=moviemeter,asc&count=40&start=1'
self.genre_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie,documentary&num_votes=100,&release_date=,date[0]&genres=%s&sort=moviemeter,asc&count=40&start=1'
self.language_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=100,&production_status=released&primary_language=%s&sort=moviemeter,asc&count=40&start=1'
self.certification_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=100,&production_status=released&certificates=us:%s&sort=moviemeter,asc&count=40&start=1'
self.boxoffice_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&sort=boxoffice_gross_us,desc&count=40&start=1'
self.added_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=500,&production_status=released&release_date=%s,%s&sort=release_date,desc&count=20&start=1' % (self.year_date, self.today_date)
self.trending_link = 'http://api.trakt.tv/movies/trending?limit=40&page=1'
self.traktlists_link = 'http://api.trakt.tv/users/me/lists'
self.traktlikedlists_link = 'http://api.trakt.tv/users/likes/lists?limit=1000000'
self.traktlist_link = 'http://api.trakt.tv/users/%s/lists/%s/items'
self.traktcollection_link = 'http://api.trakt.tv/users/me/collection/movies'
self.traktwatchlist_link = 'http://api.trakt.tv/users/me/watchlist/movies'
self.traktfeatured_link = 'http://api.trakt.tv/recommendations/movies?limit=40'
self.trakthistory_link = 'http://api.trakt.tv/users/me/history/movies?limit=40&page=1'
self.imdblists_link = 'http://www.imdb.com/user/ur%s/lists?tab=all&sort=modified:desc&filter=titles' % self.imdb_user
self.imdblist_link = 'http://www.imdb.com/list/%s/?view=detail&sort=title:asc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1'
self.imdblist2_link = 'http://www.imdb.com/list/%s/?view=detail&sort=created:desc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1'
self.imdbwatchlist_link = 'http://www.imdb.com/user/ur%s/watchlist?sort=alpha,asc' % self.imdb_user
self.imdbwatchlist2_link = 'http://www.imdb.com/user/ur%s/watchlist?sort=date_added,desc' % self.imdb_user
def get(self, url, idx=True, create_directory=True):
try:
try: url = getattr(self, url + '_link')
except: pass
try: u = urlparse.urlparse(url).netloc.lower()
except: pass
if u in self.trakt_link and '/users/' in url:
try:
if url == self.trakthistory_link: raise Exception()
if not '/users/me/' in url: raise Exception()
if trakt.getActivity() > cache.timeout(self.trakt_list, url, self.trakt_user): raise Exception()
self.list = cache.get(self.trakt_list, 720, url, self.trakt_user)
except:
self.list = cache.get(self.trakt_list, 0, url, self.trakt_user)
if '/users/me/' in url and '/collection/' in url:
self.list = sorted(self.list, key=lambda k: utils.title_key(k['title']))
if idx == True: self.worker()
elif u in self.trakt_link and self.search_link in url:
self.list = cache.get(self.trakt_list, 1, url, self.trakt_user)
if idx == True: self.worker(level=0)
elif u in self.trakt_link:
self.list = cache.get(self.trakt_list, 24, url, self.trakt_user)
if idx == True: self.worker()
elif u in self.imdb_link and ('/user/' in url or '/list/' in url):
self.list = cache.get(self.imdb_list, 0, url)
if idx == True: self.worker()
elif u in self.imdb_link:
self.list = cache.get(self.imdb_list, 24, url)
if idx == True: self.worker()
if idx == True and create_directory == True: self.movieDirectory(self.list)
return self.list
except:
pass
def widget(self):
setting = control.setting('movie.widget')
if setting == '2':
self.get(self.trending_link)
elif setting == '3':
self.get(self.popular_link)
elif setting == '4':
self.get(self.theaters_link)
elif setting == '5':
self.get(self.added_link)
else:
self.get(self.featured_link)
def search(self):
navigator.navigator().addDirectoryItem(32603, 'movieSearchnew', 'search.png', 'DefaultMovies.png')
try: from sqlite3 import dbapi2 as database
except: from pysqlite2 import dbapi2 as database
dbcon = database.connect(control.searchFile)
dbcur = dbcon.cursor()
try:
dbcur.executescript("CREATE TABLE IF NOT EXISTS movies (ID Integer PRIMARY KEY AUTOINCREMENT, term);")
except:
pass
dbcur.execute("SELECT * FROM movies ORDER BY ID DESC")
lst = []
delete_option = False
for (id,term) in dbcur.fetchall():
if term not in str(lst):
delete_option = True
navigator.navigator().addDirectoryItem(term, 'movieSearchterm&name=%s' % term, 'search.png', 'DefaultMovies.png')
lst += [(term)]
dbcur.close()
if delete_option:
navigator.navigator().addDirectoryItem(32605, 'clearCacheSearch', 'tools.png', 'DefaultAddonProgram.png')
navigator.navigator().endDirectory()
def search_new(self):
control.idle()
t = control.lang(32010).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
q = k.getText() if k.isConfirmed() else None
if (q == None or q == ''): return
try: from sqlite3 import dbapi2 as database
except: from pysqlite2 import dbapi2 as database
dbcon = database.connect(control.searchFile)
dbcur = dbcon.cursor()
dbcur.execute("INSERT INTO movies VALUES (?,?)", (None,q))
dbcon.commit()
dbcur.close()
url = self.search_link + urllib.quote_plus(q)
url = '%s?action=moviePage&url=%s' % (sys.argv[0], urllib.quote_plus(url))
control.execute('Container.Update(%s)' % url)
def search_term(self, name):
control.idle()
url = self.search_link + urllib.quote_plus(name)
url = '%s?action=moviePage&url=%s' % (sys.argv[0], urllib.quote_plus(url))
control.execute('Container.Update(%s)' % url)
def person(self):
try:
control.idle()
t = control.lang(32010).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
q = k.getText() if k.isConfirmed() else None
if (q == None or q == ''): return
url = self.persons_link + urllib.quote_plus(q)
url = '%s?action=moviePersons&url=%s' % (sys.argv[0], urllib.quote_plus(url))
control.execute('Container.Update(%s)' % url)
except:
return
def genres(self):
genres = [
('Action', 'action', True),
('Adventure', 'adventure', True),
('Animation', 'animation', True),
('Anime', 'anime', False),
('Biography', 'biography', True),
('Comedy', 'comedy', True),
('Crime', 'crime', True),
('Documentary', 'documentary', True),
('Drama', 'drama', True),
('Family', 'family', True),
('Fantasy', 'fantasy', True),
('History', 'history', True),
('Horror', 'horror', True),
('Music ', 'music', True),
('Musical', 'musical', True),
('Mystery', 'mystery', True),
('Romance', 'romance', True),
('Science Fiction', 'sci_fi', True),
('Sport', 'sport', True),
('Thriller', 'thriller', True),
('War', 'war', True),
('Western', 'western', True)
]
for i in genres: self.list.append(
{
'name': cleangenre.lang(i[0], self.lang),
'url': self.genre_link % i[1] if i[2] else self.keyword_link % i[1],
'image': 'genres.png',
'action': 'movies'
})
self.addDirectory(self.list)
return self.list
def languages(self):
languages = [
('Arabic', 'ar'),
('Bosnian', 'bs'),
('Bulgarian', 'bg'),
('Chinese', 'zh'),
('Croatian', 'hr'),
('Dutch', 'nl'),
('English', 'en'),
('Finnish', 'fi'),
('French', 'fr'),
('German', 'de'),
('Greek', 'el'),
('Hebrew', 'he'),
('Hindi ', 'hi'),
('Hungarian', 'hu'),
('Icelandic', 'is'),
('Italian', 'it'),
('Japanese', 'ja'),
('Korean', 'ko'),
('Macedonian', 'mk'),
('Norwegian', 'no'),
('Persian', 'fa'),
('Polish', 'pl'),
('Portuguese', 'pt'),
('Punjabi', 'pa'),
('Romanian', 'ro'),
('Russian', 'ru'),
('Serbian', 'sr'),
('Slovenian', 'sl'),
('Spanish', 'es'),
('Swedish', 'sv'),
('Turkish', 'tr'),
('Ukrainian', 'uk')
]
for i in languages: self.list.append({'name': str(i[0]), 'url': self.language_link % i[1], 'image': 'languages.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def certifications(self):
certificates = ['G', 'PG', 'PG-13', 'R', 'NC-17']
for i in certificates: self.list.append({'name': str(i), 'url': self.certification_link % str(i).replace('-', '_').lower(), 'image': 'certificates.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def years(self):
year = (self.datetime.strftime('%Y'))
for i in range(int(year)-0, 1900, -1): self.list.append({'name': str(i), 'url': self.year_link % (str(i), str(i)), 'image': 'years.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def persons(self, url):
if url == None:
self.list = cache.get(self.imdb_person_list, 24, self.personlist_link)
else:
self.list = cache.get(self.imdb_person_list, 1, url)
for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
self.addDirectory(self.list)
return self.list
def userlists(self):
try:
userlists = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
activity = trakt.getActivity()
except:
pass
try:
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link, self.trakt_user)
except:
pass
try:
self.list = []
if self.imdb_user == '': raise Exception()
userlists += cache.get(self.imdb_user_list, 0, self.imdblists_link)
except:
pass
try:
self.list = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlikedlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlikedlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlikedlists_link, self.trakt_user)
except:
pass
self.list = userlists
for i in range(0, len(self.list)): self.list[i].update({'image': 'userlists.png', 'action': 'movies'})
self.addDirectory(self.list, queue=True)
return self.list
def trakt_list(self, url, user):
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
q.update({'extended': 'full'})
q = (urllib.urlencode(q)).replace('%2C', ',')
u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
result = trakt.getTraktAsJson(u)
items = []
for i in result:
try: items.append(i['movie'])
except: pass
if len(items) == 0:
items = result
except:
return
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
if not int(q['limit']) == len(items): raise Exception()
q.update({'page': str(int(q['page']) + 1)})
q = (urllib.urlencode(q)).replace('%2C', ',')
next = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['title']
title = client.replaceHTMLCodes(title)
year = item['year']
year = re.sub('[^0-9]', '', str(year))
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
imdb = item['ids']['imdb']
if imdb == None or imdb == '': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
tmdb = str(item.get('ids', {}).get('tmdb', 0))
try: premiered = item['released']
except: premiered = '0'
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
try: genre = item['genres']
except: genre = '0'
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
try: duration = str(item['runtime'])
except: duration = '0'
if duration == None: duration = '0'
try: rating = str(item['rating'])
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
try: votes = str(item['votes'])
except: votes = '0'
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None: votes = '0'
try: mpaa = item['certification']
except: mpaa = '0'
if mpaa == None: mpaa = '0'
try: plot = item['overview']
except: plot = '0'
if plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
try: tagline = item['tagline']
except: tagline = '0'
if tagline == None: tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'plot': plot, 'tagline': tagline, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'poster': '0', 'next': next})
except:
pass
return self.list
def trakt_user_list(self, url, user):
try:
items = trakt.getTraktAsJson(url)
except:
pass
for item in items:
try:
try: name = item['list']['name']
except: name = item['name']
name = client.replaceHTMLCodes(name)
try: url = (trakt.slug(item['list']['user']['username']), item['list']['ids']['slug'])
except: url = ('me', item['ids']['slug'])
url = self.traktlist_link % url
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: utils.title_key(k['name']))
return self.list
def imdb_list(self, url):
try:
for i in re.findall('date\[(\d+)\]', url):
url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))
def imdb_watchlist_id(url):
return client.parseDOM(client.request(url), 'meta', ret='content', attrs = {'property': 'pageId'})[0]
if url == self.imdbwatchlist_link:
url = cache.get(imdb_watchlist_id, 8640, url)
url = self.imdblist_link % url
elif url == self.imdbwatchlist2_link:
url = cache.get(imdb_watchlist_id, 8640, url)
url = self.imdblist2_link % url
result = client.request(url)
result = result.replace('\n', ' ')
items = client.parseDOM(result, 'div', attrs = {'class': 'lister-item mode-advanced'})
items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
except:
return
try:
next = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'lister-page-next.+?'})
if len(next) == 0:
next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0]
next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a'))
next = [i[0] for i in next if 'Next' in i[1]]
next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query)
next = client.replaceHTMLCodes(next)
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = client.parseDOM(item, 'a')[1]
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'})
year += client.parseDOM(item, 'span', attrs = {'class': 'year_type'})
try: year = re.compile('(\d{4})').findall(year)[0]
except: year = '0'
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
imdb = client.parseDOM(item, 'a', ret='href')[0]
imdb = re.findall('(tt\d*)', imdb)[0]
imdb = imdb.encode('utf-8')
try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
except: poster = '0'
if '/nopicture/' in poster: poster = '0'
poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster)
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})[0]
except: genre = '0'
genre = ' / '.join([i.strip() for i in genre.split(',')])
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = re.findall('(\d+?) min(?:s|)', item)[-1]
except: duration = '0'
duration = duration.encode('utf-8')
rating = '0'
try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
except: pass
try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
except: rating = '0'
try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0]
except: pass
if rating == '' or rating == '-': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': '.*?rating-list'})[0]
except: votes = '0'
try: votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
except: mpaa = '0'
if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
mpaa = mpaa.replace('_', '-')
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0]
except: director = '0'
director = client.parseDOM(director, 'a')
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0]
except: cast = '0'
cast = client.replaceHTMLCodes(cast)
cast = cast.encode('utf-8')
cast = client.parseDOM(cast, 'a')
if cast == []: cast = '0'
plot = '0'
try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0]
except: pass
try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
except: pass
plot = plot.rsplit('<span>', 1)[0].strip()
plot = re.sub('<.+?>|</.+?>', '', plot)
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'cast': cast, 'plot': plot, 'tagline': '0', 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'poster': poster, 'next': next})
except:
pass
return self.list
def imdb_person_list(self, url):
try:
result = client.request(url)
items = client.parseDOM(result, 'tr', attrs = {'class': '.+? detailed'})
except:
return
for item in items:
try:
name = client.parseDOM(item, 'a', ret='title')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = re.findall('(nm\d*)', url, re.I)[0]
url = self.person_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = client.parseDOM(item, 'img', ret='src')[0]
if not ('._SX' in image or '._SY' in image): raise Exception()
image = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', image)
image = client.replaceHTMLCodes(image)
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
def imdb_user_list(self, url):
try:
result = client.request(url)
items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'})
except:
pass
for item in items:
try:
name = client.parseDOM(item, 'a')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = url.split('/list/', 1)[-1].replace('/', '')
url = self.imdblist_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: utils.title_key(k['name']))
return self.list
def worker(self, level=1):
self.meta = []
total = len(self.list)
self.fanart_tv_headers = {'api-key': 'NDZkZmMyN2M1MmE0YTc3MjY3NWQ4ZTMyYjdiY2E2OGU='.decode('base64')}
if not self.fanart_tv_user == '':
self.fanart_tv_headers.update({'client-key': self.fanart_tv_user})
for i in range(0, total): self.list[i].update({'metacache': False})
self.list = metacache.fetch(self.list, self.lang, self.user)
for r in range(0, total, 40):
threads = []
for i in range(r, r+40):
if i <= total: threads.append(workers.Thread(self.super_info, i))
[i.start() for i in threads]
[i.join() for i in threads]
if self.meta: metacache.insert(self.meta)
self.list = [i for i in self.list if not i['imdb'] == '0']
self.list = metacache.local(self.list, self.tm_img_link, 'poster3', 'fanart2')
if self.fanart_tv_user == '':
for i in self.list: i.update({'clearlogo': '0', 'clearart': '0'})
def super_info(self, i):
try:
if self.list[i]['metacache'] == True: raise Exception()
imdb = self.list[i]['imdb']
item = trakt.getMovieSummary(imdb)
title = item.get('title')
title = client.replaceHTMLCodes(title)
originaltitle = title
year = item.get('year', 0)
year = re.sub('[^0-9]', '', str(year))
imdb = item.get('ids', {}).get('imdb', '0')
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
tmdb = str(item.get('ids', {}).get('tmdb', 0))
premiered = item.get('released', '0')
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
genre = item.get('genres', [])
genre = [x.title() for x in genre]
genre = ' / '.join(genre).strip()
if not genre: genre = '0'
duration = str(item.get('Runtime', 0))
rating = item.get('rating', '0')
if not rating or rating == '0.0': rating = '0'
votes = item.get('votes', '0')
try: votes = str(format(int(votes), ',d'))
except: pass
mpaa = item.get('certification', '0')
if not mpaa: mpaa = '0'
tagline = item.get('tagline', '0')
plot = item.get('overview', '0')
people = trakt.getPeople(imdb, 'movies')
director = writer = ''
if 'crew' in people and 'directing' in people['crew']:
director = ', '.join([director['person']['name'] for director in people['crew']['directing'] if director['job'].lower() == 'director'])
if 'crew' in people and 'writing' in people['crew']:
writer = ', '.join([writer['person']['name'] for writer in people['crew']['writing'] if writer['job'].lower() in ['writer', 'screenplay', 'author']])
cast = []
for person in people.get('cast', []):
cast.append({'name': person['person']['name'], 'role': person['character']})
cast = [(person['name'], person['role']) for person in cast]
try:
if self.lang == 'en' or self.lang not in item.get('available_translations', [self.lang]): raise Exception()
trans_item = trakt.getMovieTranslation(imdb, self.lang, full=True)
title = trans_item.get('title') or title
tagline = trans_item.get('tagline') or tagline
plot = trans_item.get('overview') or plot
except:
pass
try:
artmeta = True
#if self.fanart_tv_user == '': raise Exception()
art = client.request(self.fanart_tv_art_link % imdb, headers=self.fanart_tv_headers, timeout='10', error=True)
try: art = json.loads(art)
except: artmeta = False
except:
pass
try:
poster2 = art['movieposter']
poster2 = [x for x in poster2 if x.get('lang') == self.lang][::-1] + [x for x in poster2 if x.get('lang') == 'en'][::-1] + [x for x in poster2 if x.get('lang') in ['00', '']][::-1]
poster2 = poster2[0]['url'].encode('utf-8')
except:
poster2 = '0'
try:
if 'moviebackground' in art: fanart = art['moviebackground']
else: fanart = art['moviethumb']
fanart = [x for x in fanart if x.get('lang') == self.lang][::-1] + [x for x in fanart if x.get('lang') == 'en'][::-1] + [x for x in fanart if x.get('lang') in ['00', '']][::-1]
fanart = fanart[0]['url'].encode('utf-8')
except:
fanart = '0'
try:
banner = art['moviebanner']
banner = [x for x in banner if x.get('lang') == self.lang][::-1] + [x for x in banner if x.get('lang') == 'en'][::-1] + [x for x in banner if x.get('lang') in ['00', '']][::-1]
banner = banner[0]['url'].encode('utf-8')
except:
banner = '0'
try:
if 'hdmovielogo' in art: clearlogo = art['hdmovielogo']
else: clearlogo = art['clearlogo']
clearlogo = [x for x in clearlogo if x.get('lang') == self.lang][::-1] + [x for x in clearlogo if x.get('lang') == 'en'][::-1] + [x for x in clearlogo if x.get('lang') in ['00', '']][::-1]
clearlogo = clearlogo[0]['url'].encode('utf-8')
except:
clearlogo = '0'
try:
if 'hdmovieclearart' in art: clearart = art['hdmovieclearart']
else: clearart = art['clearart']
clearart = [x for x in clearart if x.get('lang') == self.lang][::-1] + [x for x in clearart if x.get('lang') == 'en'][::-1] + [x for x in clearart if x.get('lang') in ['00', '']][::-1]
clearart = clearart[0]['url'].encode('utf-8')
except:
clearart = '0'
try:
if self.tm_user == '': raise Exception()
art2 = client.request(self.tm_art_link % imdb, timeout='10', error=True)
art2 = json.loads(art2)
except:
pass
try:
poster3 = art2['posters']
poster3 = [x for x in poster3 if x.get('iso_639_1') == self.lang] + [x for x in poster3 if x.get('iso_639_1') == 'en'] + [x for x in poster3 if x.get('iso_639_1') not in [self.lang, 'en']]
poster3 = [(x['width'], x['file_path']) for x in poster3]
poster3 = [(x[0], x[1]) if x[0] < 300 else ('300', x[1]) for x in poster3]
poster3 = self.tm_img_link % poster3[0]
poster3 = poster3.encode('utf-8')
except:
poster3 = '0'
try:
fanart2 = art2['backdrops']
fanart2 = [x for x in fanart2 if x.get('iso_639_1') == self.lang] + [x for x in fanart2 if x.get('iso_639_1') == 'en'] + [x for x in fanart2 if x.get('iso_639_1') not in [self.lang, 'en']]
fanart2 = [x for x in fanart2 if x.get('width') == 1920] + [x for x in fanart2 if x.get('width') < 1920]
fanart2 = [(x['width'], x['file_path']) for x in fanart2]
fanart2 = [(x[0], x[1]) if x[0] < 1280 else ('1280', x[1]) for x in fanart2]
fanart2 = self.tm_img_link % fanart2[0]
fanart2 = fanart2.encode('utf-8')
except:
fanart2 = '0'
item = {'title': title, 'originaltitle': originaltitle, 'year': year, 'imdb': imdb, 'tmdb': tmdb, 'poster': '0', 'poster2': poster2, 'poster3': poster3, 'banner': banner, 'fanart': fanart, 'fanart2': fanart2, 'clearlogo': clearlogo, 'clearart': clearart, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline}
item = dict((k,v) for k, v in item.iteritems() if not v == '0')
self.list[i].update(item)
if artmeta == False: raise Exception()
meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.lang, 'user': self.user, 'item': item}
self.meta.append(meta)
except:
pass
def movieDirectory(self, items):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
traktCredentials = trakt.getTraktCredentialsInfo()
try: isOld = False ; control.item().getArt('type')
except: isOld = True
isPlayable = 'true' if not 'plugin' in control.infoLabel('Container.PluginName') else 'false'
indicators = playcount.getMovieIndicators(refresh=True) if action == 'movies' else playcount.getMovieIndicators()
playbackMenu = control.lang(32063).encode('utf-8') if control.setting('hosts.mode') == '2' else control.lang(32064).encode('utf-8')
watchedMenu = control.lang(32068).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32066).encode('utf-8')
unwatchedMenu = control.lang(32069).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32067).encode('utf-8')
queueMenu = control.lang(32065).encode('utf-8')
traktManagerMenu = control.lang(32070).encode('utf-8')
nextMenu = control.lang(32053).encode('utf-8')
addToLibrary = control.lang(32551).encode('utf-8')
for i in items:
try:
label = '%s (%s)' % (i['title'], i['year'])
imdb, tmdb, title, year = i['imdb'], i['tmdb'], i['originaltitle'], i['year']
sysname = urllib.quote_plus('%s (%s)' % (title, year))
systitle = urllib.quote_plus(title)
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'code': imdb, 'imdbnumber': imdb, 'imdb_id': imdb})
meta.update({'tmdb_id': tmdb})
meta.update({'mediatype': 'movie'})
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, urllib.quote_plus(label))})
#meta.update({'trailer': 'plugin://script.extendedinfo/?info=playtrailer&&id=%s' % imdb})
if not 'duration' in i: meta.update({'duration': '120'})
elif i['duration'] == '0': meta.update({'duration': '120'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
except: pass
poster = [i[x] for x in ['poster3', 'poster', 'poster2'] if i.get(x, '0') != '0']
poster = poster[0] if poster else addonPoster
meta.update({'poster': poster})
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&title=%s&year=%s&imdb=%s&meta=%s&t=%s' % (sysaddon, systitle, year, imdb, sysmeta, self.systime)
sysurl = urllib.quote_plus(url)
path = '%s?action=play&title=%s&year=%s&imdb=%s' % (sysaddon, systitle, year, imdb)
cm = []
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
try:
overlay = int(playcount.getMovieOverlay(indicators, imdb))
if overlay == 7:
cm.append((unwatchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=6)' % (sysaddon, imdb)))
meta.update({'playcount': 1, 'overlay': 7})
else:
cm.append((watchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=7)' % (sysaddon, imdb)))
meta.update({'playcount': 0, 'overlay': 6})
except:
pass
if traktCredentials == True:
cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&imdb=%s&content=movie)' % (sysaddon, sysname, imdb)))
cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))
if isOld == True:
cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)'))
cm.append((addToLibrary, 'RunPlugin(%s?action=movieToLibrary&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s)' % (sysaddon, sysname, systitle, year, imdb, tmdb)))
item = control.item(label=label)
art = {}
art.update({'icon': poster, 'thumb': poster, 'poster': poster})
if 'banner' in i and not i['banner'] == '0':
art.update({'banner': i['banner']})
else:
art.update({'banner': addonBanner})
if 'clearlogo' in i and not i['clearlogo'] == '0':
art.update({'clearlogo': i['clearlogo']})
if 'clearart' in i and not i['clearart'] == '0':
art.update({'clearart': i['clearart']})
if settingFanart == 'true' and 'fanart2' in i and not i['fanart2'] == '0':
item.setProperty('Fanart_Image', i['fanart2'])
elif settingFanart == 'true' and 'fanart' in i and not i['fanart'] == '0':
item.setProperty('Fanart_Image', i['fanart'])
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setArt(art)
item.addContextMenuItems(cm)
item.setProperty('IsPlayable', isPlayable)
item.setInfo(type='Video', infoLabels = meta)
video_streaminfo = {'codec': 'h264'}
item.addStreamInfo('video', video_streaminfo)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=False)
except:
pass
try:
url = items[0]['next']
if url == '': raise Exception()
icon = control.addonNext()
url = '%s?action=moviePage&url=%s' % (sysaddon, urllib.quote_plus(url))
item = control.item(label=nextMenu)
item.setArt({'icon': icon, 'thumb': icon, 'poster': icon, 'banner': icon})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
control.content(syshandle, 'movies')
control.directory(syshandle, cacheToDisc=True)
views.setView('movies', {'skin.estuary': 55, 'skin.confluence': 500})
def addDirectory(self, items, queue=False):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonFanart, addonThumb, artPath = control.addonFanart(), control.addonThumb(), control.artPath()
queueMenu = control.lang(32065).encode('utf-8')
playRandom = control.lang(32535).encode('utf-8')
addToLibrary = control.lang(32551).encode('utf-8')
for i in items:
try:
name = i['name']
if i['image'].startswith('http'): thumb = i['image']
elif not artPath == None: thumb = os.path.join(artPath, i['image'])
else: thumb = addonThumb
url = '%s?action=%s' % (sysaddon, i['action'])
try: url += '&url=%s' % urllib.quote_plus(i['url'])
except: pass
cm = []
cm.append((playRandom, 'RunPlugin(%s?action=random&rtype=movie&url=%s)' % (sysaddon, urllib.quote_plus(i['url']))))
if queue == True:
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
try: cm.append((addToLibrary, 'RunPlugin(%s?action=moviesToLibrary&url=%s)' % (sysaddon, urllib.quote_plus(i['context']))))
except: pass
item = control.item(label=name)
item.setArt({'icon': thumb, 'thumb': thumb})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
item.addContextMenuItems(cm)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
control.content(syshandle, 'addons')
control.directory(syshandle, cacheToDisc=True)
|
gpl-2.0
|
eugenesan/postman
|
postman/FlickrWorker.py
|
1
|
2057
|
from BaseWorker import *
class FlickrWorker(BaseWorker):
key = '391fb6763fe0b5011cf52638067e0fed'
secret = '369f46a112452186'
def __init__(self, parent = None):
super(FlickrWorker, self).__init__(parent)
def run(self):
self.progressSignal.emit(self.stampConfig)
if not self.filesModel.count():
return
progressStep = 1.0 / self.filesModel.count()
flickrInstance = self.stampConfig['flickrInst']
for i in range(self.filesModel.count()):
self.status = 'Uploading: %s' % self.filesModel.filesList[i].filePath
self.statusSignal.emit(self.stampConfig)
imageFilename = self.filesModel.filesList[i].filePath.encode('UTF-8','ignore')
imageTitle = self.filesModel.filesList[i].title.encode('UTF-8','ignore')
imageDescription = self.filesModel.filesList[i].description.encode('UTF-8','ignore')
imageTags = self.filesModel.filesList[i].tags.encode('UTF-8','ignore')
# build space delimited tags string
tagList = ['"%s"' % tag.strip() for tag in imageTags.split(',')]
tagsString = ' '.join(tagList)
for r in range(self.retry):
try:
flickrInstance.upload(filename=imageFilename, title=imageTitle, description=imageDescription, tags=tagsString)
break
except Exception:
if r == self.retry - 1:
self.status = 'Failed'
self.statusSignal.emit(self.stampConfig)
self.doneSignal.emit(self.stampConfig)
return
self.progress += progressStep
self.progressSignal.emit(self.stampConfig)
self.status = 'Done'
self.result = True
self.statusSignal.emit(self.stampConfig)
self.doneSignal.emit(self.stampConfig)
|
gpl-3.0
|
4tikhonov/eurogis
|
maps/bin/get_regions.py
|
4
|
1346
|
#!/usr/bin/python
import urllib2
import simplejson
import json
import sys
from shapely.geometry import shape, Polygon, MultiPolygon
#from shapely.geometry.multipolygon import shape
# Example of polygon
co1 = {"type": "Polygon", "coordinates": [
[(-102.05, 41.0),
(-102.05, 37.0),
(-109.05, 37.0),
(-109.05, 41.0)]]}
year = None
code = None
if sys.argv[1]:
code = sys.argv[1]
if len(sys.argv) > 2:
year = sys.argv[2]
try:
code
except NameError:
code = 10426
try:
year
except NameError:
year = str(1812)
amscode= str(code)
jsondataurl = "http://node-128.dev.socialhistoryservices.org/api/maps?year=" + year + "&format=geojson"
req = urllib2.Request(jsondataurl)
opener = urllib2.build_opener()
f = opener.open(req)
datapolygons = simplejson.load(f)
def coordinates(polygons, amscode):
for key in polygons:
if key == 'features':
data = polygons[key]
for key in data:
response = json.dumps(key)
dict = json.loads(response)
for key in dict:
#print key['properties']
if key == 'properties':
maincode = str(dict[key]['amsterdamcode'])
if maincode == amscode:
co = dict['geometry']
return co
geometry = coordinates(datapolygons, amscode)
#geometry = co1
size = shape(geometry).area / 27878400
print size;
print shape(geometry).type;
|
gpl-3.0
|
emmericp/dpdk
|
app/test/autotest_runner.py
|
1
|
14419
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2014 Intel Corporation
# The main logic behind running autotests in parallel
from __future__ import print_function
import StringIO
import csv
from multiprocessing import Pool, Queue
import pexpect
import re
import subprocess
import sys
import time
import glob
import os
# wait for prompt
def wait_prompt(child):
try:
child.sendline()
result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF],
timeout=120)
except:
return False
if result == 0:
return True
else:
return False
# get all valid NUMA nodes
def get_numa_nodes():
return [
int(
re.match(r"node(\d+)", os.path.basename(node))
.group(1)
)
for node in glob.glob("/sys/devices/system/node/node*")
]
# find first (or any, really) CPU on a particular node, will be used to spread
# processes around NUMA nodes to avoid exhausting memory on particular node
def first_cpu_on_node(node_nr):
cpu_path = glob.glob("/sys/devices/system/node/node%d/cpu*" % node_nr)[0]
cpu_name = os.path.basename(cpu_path)
m = re.match(r"cpu(\d+)", cpu_name)
return int(m.group(1))
pool_child = None # per-process child
# we initialize each worker with a queue because we need per-pool unique
# command-line arguments, but we cannot do different arguments in an initializer
# because the API doesn't allow per-worker initializer arguments. so, instead,
# we will initialize with a shared queue, and dequeue command-line arguments
# from this queue
def pool_init(queue, result_queue):
global pool_child
cmdline, prefix = queue.get()
start_time = time.time()
name = ("Start %s" % prefix) if prefix != "" else "Start"
# use default prefix if no prefix was specified
prefix_cmdline = "--file-prefix=%s" % prefix if prefix != "" else ""
# append prefix to cmdline
cmdline = "%s %s" % (cmdline, prefix_cmdline)
# prepare logging of init
startuplog = StringIO.StringIO()
# run test app
try:
print("\n%s %s\n" % ("=" * 20, prefix), file=startuplog)
print("\ncmdline=%s" % cmdline, file=startuplog)
pool_child = pexpect.spawn(cmdline, logfile=startuplog)
# wait for target to boot
if not wait_prompt(pool_child):
pool_child.close()
result = tuple((-1,
"Fail [No prompt]",
name,
time.time() - start_time,
startuplog.getvalue(),
None))
pool_child = None
else:
result = tuple((0,
"Success",
name,
time.time() - start_time,
startuplog.getvalue(),
None))
except:
result = tuple((-1,
"Fail [Can't run]",
name,
time.time() - start_time,
startuplog.getvalue(),
None))
pool_child = None
result_queue.put(result)
# run a test
# each result tuple in results list consists of:
# result value (0 or -1)
# result string
# test name
# total test run time (double)
# raw test log
# test report (if not available, should be None)
#
# this function needs to be outside AutotestRunner class because otherwise Pool
# won't work (or rather it will require quite a bit of effort to make it work).
def run_test(target, test):
global pool_child
if pool_child is None:
return -1, "Fail [No test process]", test["Name"], 0, "", None
# create log buffer for each test
# in multiprocessing environment, the logging would be
# interleaved and will create a mess, hence the buffering
logfile = StringIO.StringIO()
pool_child.logfile = logfile
# make a note when the test started
start_time = time.time()
try:
# print test name to log buffer
print("\n%s %s\n" % ("-" * 20, test["Name"]), file=logfile)
# run test function associated with the test
result = test["Func"](pool_child, test["Command"])
# make a note when the test was finished
end_time = time.time()
log = logfile.getvalue()
# append test data to the result tuple
result += (test["Name"], end_time - start_time, log)
# call report function, if any defined, and supply it with
# target and complete log for test run
if test["Report"]:
report = test["Report"](target, log)
# append report to results tuple
result += (report,)
else:
# report is None
result += (None,)
except:
# make a note when the test crashed
end_time = time.time()
# mark test as failed
result = (-1, "Fail [Crash]", test["Name"],
end_time - start_time, logfile.getvalue(), None)
# return test results
return result
# class representing an instance of autotests run
class AutotestRunner:
cmdline = ""
parallel_test_groups = []
non_parallel_test_groups = []
logfile = None
csvwriter = None
target = ""
start = None
n_tests = 0
fails = 0
log_buffers = []
blacklist = []
whitelist = []
def __init__(self, cmdline, target, blacklist, whitelist, n_processes):
self.cmdline = cmdline
self.target = target
self.blacklist = blacklist
self.whitelist = whitelist
self.skipped = []
self.parallel_tests = []
self.non_parallel_tests = []
self.n_processes = n_processes
self.active_processes = 0
# parse the binary for available test commands
binary = cmdline.split()[0]
stripped = 'not stripped' not in \
subprocess.check_output(['file', binary])
if not stripped:
symbols = subprocess.check_output(['nm', binary]).decode('utf-8')
self.avail_cmds = re.findall('test_register_(\w+)', symbols)
else:
self.avail_cmds = None
# log file filename
logfile = "%s.log" % target
csvfile = "%s.csv" % target
self.logfile = open(logfile, "w")
csvfile = open(csvfile, "w")
self.csvwriter = csv.writer(csvfile)
# prepare results table
self.csvwriter.writerow(["test_name", "test_result", "result_str"])
# set up cmdline string
def __get_cmdline(self, cpu_nr):
cmdline = ("taskset -c %i " % cpu_nr) + self.cmdline
return cmdline
def __process_result(self, result):
# unpack result tuple
test_result, result_str, test_name, \
test_time, log, report = result
# get total run time
cur_time = time.time()
total_time = int(cur_time - self.start)
# print results, test run time and total time since start
result = ("%s:" % test_name).ljust(30)
result += result_str.ljust(29)
result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
# don't print out total time every line, it's the same anyway
print(result + "[%02dm %02ds]" % (total_time / 60, total_time % 60))
# if test failed and it wasn't a "start" test
if test_result < 0:
self.fails += 1
# collect logs
self.log_buffers.append(log)
# create report if it exists
if report:
try:
f = open("%s_%s_report.rst" %
(self.target, test_name), "w")
except IOError:
print("Report for %s could not be created!" % test_name)
else:
with f:
f.write(report)
# write test result to CSV file
self.csvwriter.writerow([test_name, test_result, result_str])
# this function checks individual test and decides if this test should be in
# the group by comparing it against whitelist/blacklist. it also checks if
# the test is compiled into the binary, and marks it as skipped if necessary
def __filter_test(self, test):
test_cmd = test["Command"]
test_id = test_cmd
# dump tests are specified in full e.g. "Dump_mempool"
if "_autotest" in test_id:
test_id = test_id[:-len("_autotest")]
# filter out blacklisted/whitelisted tests
if self.blacklist and test_id in self.blacklist:
return False
if self.whitelist and test_id not in self.whitelist:
return False
# if test wasn't compiled in, remove it as well
if self.avail_cmds and test_cmd not in self.avail_cmds:
result = 0, "Skipped [Not compiled]", test_id, 0, "", None
self.skipped.append(tuple(result))
return False
return True
def __run_test_group(self, test_group, worker_cmdlines):
group_queue = Queue()
init_result_queue = Queue()
for proc, cmdline in enumerate(worker_cmdlines):
prefix = "test%i" % proc if len(worker_cmdlines) > 1 else ""
group_queue.put(tuple((cmdline, prefix)))
# create a pool of worker threads
# we will initialize child in the initializer, and we don't need to
# close the child because when the pool worker gets destroyed, child
# closes the process
pool = Pool(processes=len(worker_cmdlines),
initializer=pool_init,
initargs=(group_queue, init_result_queue))
results = []
# process all initialization results
for _ in range(len(worker_cmdlines)):
self.__process_result(init_result_queue.get())
# run all tests asynchronously
for test in test_group:
result = pool.apply_async(run_test, (self.target, test))
results.append(result)
# tell the pool to stop all processes once done
pool.close()
# iterate while we have group execution results to get
while len(results) > 0:
# iterate over a copy to be able to safely delete results
# this iterates over a list of group results
for async_result in results[:]:
# if the thread hasn't finished yet, continue
if not async_result.ready():
continue
res = async_result.get()
self.__process_result(res)
# remove result from results list once we're done with it
results.remove(async_result)
# iterate over test groups and run tests associated with them
def run_all_tests(self):
# filter groups
self.parallel_tests = list(
filter(self.__filter_test,
self.parallel_tests)
)
self.non_parallel_tests = list(
filter(self.__filter_test,
self.non_parallel_tests)
)
parallel_cmdlines = []
# FreeBSD doesn't have NUMA support
numa_nodes = get_numa_nodes()
if len(numa_nodes) > 0:
for proc in range(self.n_processes):
# spread cpu affinity between NUMA nodes to have less chance of
# running out of memory while running multiple test apps in
# parallel. to do that, alternate between NUMA nodes in a round
# robin fashion, and pick an arbitrary CPU from that node to
# taskset our execution to
numa_node = numa_nodes[self.active_processes % len(numa_nodes)]
cpu_nr = first_cpu_on_node(numa_node)
parallel_cmdlines += [self.__get_cmdline(cpu_nr)]
# increase number of active processes so that the next cmdline
# gets a different NUMA node
self.active_processes += 1
else:
parallel_cmdlines = [self.cmdline] * self.n_processes
print("Running tests with %d workers" % self.n_processes)
# create table header
print("")
print("Test name".ljust(30) + "Test result".ljust(29) +
"Test".center(9) + "Total".center(9))
print("=" * 80)
if len(self.skipped):
print("Skipped autotests:")
# print out any skipped tests
for result in self.skipped:
# unpack result tuple
test_result, result_str, test_name, _, _, _ = result
self.csvwriter.writerow([test_name, test_result, result_str])
t = ("%s:" % test_name).ljust(30)
t += result_str.ljust(29)
t += "[00m 00s]"
print(t)
# make a note of tests start time
self.start = time.time()
# whatever happens, try to save as much logs as possible
try:
if len(self.parallel_tests) > 0:
print("Parallel autotests:")
self.__run_test_group(self.parallel_tests, parallel_cmdlines)
if len(self.non_parallel_tests) > 0:
print("Non-parallel autotests:")
self.__run_test_group(self.non_parallel_tests, [self.cmdline])
# get total run time
cur_time = time.time()
total_time = int(cur_time - self.start)
# print out summary
print("=" * 80)
print("Total run time: %02dm %02ds" % (total_time / 60,
total_time % 60))
if self.fails != 0:
print("Number of failed tests: %s" % str(self.fails))
# write summary to logfile
self.logfile.write("Summary\n")
self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target)
self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests)
self.logfile.write("Failed tests: ".ljust(
15) + "%i\n" % self.fails)
except:
print("Exception occurred")
print(sys.exc_info())
self.fails = 1
# drop logs from all executions to a logfile
for buf in self.log_buffers:
self.logfile.write(buf.replace("\r", ""))
return self.fails
|
gpl-2.0
|
kenju254/yowsup
|
yowsup/layers/protocol_groups/protocolentities/iq_groups_participants_remove.py
|
61
|
1128
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .iq_groups_participants import ParticipantsGroupsIqProtocolEntity
class RemoveParticipantsIqProtocolEntity(ParticipantsGroupsIqProtocolEntity):
'''
<iq type="set" id="{{id}}" xmlns="w:g2", to="{{group_jid}}">
<remove>
<participant jid="{{jid}}"></participant>
<participant jid="{{jid}}"></participant>
</remove>
</iq>
'''
def __init__(self, group_jid, participantList, _id = None):
super(RemoveParticipantsIqProtocolEntity, self).__init__(group_jid, participantList, "remove", _id = _id)
@staticmethod
def fromProtocolTreeNode(node):
entity = super(RemoveParticipantsIqProtocolEntity, RemoveParticipantsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = RemoveParticipantsIqProtocolEntity
participantList = []
for participantNode in node.getChild("remove").getAllChildren():
participantList.append(participantNode["jid"])
entity.setProps(node.getAttributeValue("to"), participantList)
return entity
|
gpl-3.0
|
Hybrid-Rom/external_skia
|
tools/gen_bench_expectations_from_codereview.py
|
67
|
5806
|
#!/usr/bin/python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate new bench expectations from results of trybots on a code review."""
import collections
import compare_codereview
import os
import re
import shutil
import subprocess
import sys
BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/*'
CHECKOUT_PATH = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
TMP_BENCH_DATA_DIR = os.path.join(CHECKOUT_PATH, '.bench_data')
TryBuild = collections.namedtuple(
'TryBuild', ['builder_name', 'build_number', 'is_finished'])
def find_all_builds(codereview_url):
"""Finds and returns information about trybot runs for a code review.
Args:
codereview_url: URL of the codereview in question.
Returns:
List of NamedTuples: (builder_name, build_number, is_finished)
"""
results = compare_codereview.CodeReviewHTMLParser().parse(codereview_url)
try_builds = []
for builder, data in results.iteritems():
if builder.startswith('Perf'):
build_num = data.url.split('/')[-1] if data.url else None
is_finished = (data.status not in ('pending', 'try-pending') and
build_num is not None)
try_builds.append(TryBuild(builder_name=builder,
build_number=build_num,
is_finished=is_finished))
return try_builds
def _all_trybots_finished(try_builds):
"""Return True iff all of the given try jobs have finished.
Args:
try_builds: list of TryBuild instances.
Returns:
True if all of the given try jobs have finished, otherwise False.
"""
for try_build in try_builds:
if not try_build.is_finished:
return False
return True
def all_trybots_finished(codereview_url):
"""Return True iff all of the try jobs on the given codereview have finished.
Args:
codereview_url: string; URL of the codereview.
Returns:
True if all of the try jobs have finished, otherwise False.
"""
return _all_trybots_finished(find_all_builds(codereview_url))
def get_bench_data(builder, build_num, dest_dir):
"""Download the bench data for the given builder at the given build_num.
Args:
builder: string; name of the builder.
build_num: string; build number.
dest_dir: string; destination directory for the bench data.
"""
url = BENCH_DATA_URL % (builder, build_num)
subprocess.check_call(['gsutil', 'cp', '-R', url, dest_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def find_revision_from_downloaded_data(dest_dir):
"""Finds the revision at which the downloaded data was generated.
Args:
dest_dir: string; directory holding the downloaded data.
Returns:
The revision (git commit hash) at which the downloaded data was
generated, or None if no revision can be found.
"""
for data_file in os.listdir(dest_dir):
match = re.match('bench_(?P<revision>[0-9a-fA-F]{2,40})_data.*', data_file)
if match:
return match.group('revision')
return None
class TrybotNotFinishedError(Exception):
pass
def gen_bench_expectations_from_codereview(codereview_url,
error_on_unfinished=True):
"""Generate bench expectations from a code review.
Scans the given code review for Perf trybot runs. Downloads the results of
finished trybots and uses them to generate new expectations for their
waterfall counterparts.
Args:
url: string; URL of the code review.
error_on_unfinished: bool; throw an error if any trybot has not finished.
"""
try_builds = find_all_builds(codereview_url)
# Verify that all trybots have finished running.
if error_on_unfinished and not _all_trybots_finished(try_builds):
raise TrybotNotFinishedError('Not all trybots have finished.')
failed_data_pull = []
failed_gen_expectations = []
if os.path.isdir(TMP_BENCH_DATA_DIR):
shutil.rmtree(TMP_BENCH_DATA_DIR)
for try_build in try_builds:
try_builder = try_build.builder_name
builder = try_builder.replace('-Trybot', '')
# Download the data.
dest_dir = os.path.join(TMP_BENCH_DATA_DIR, builder)
os.makedirs(dest_dir)
try:
get_bench_data(try_builder, try_build.build_number, dest_dir)
except subprocess.CalledProcessError:
failed_data_pull.append(try_builder)
continue
# Find the revision at which the data was generated.
revision = find_revision_from_downloaded_data(dest_dir)
if not revision:
# If we can't find a revision, then something is wrong with the data we
# downloaded. Skip this builder.
failed_data_pull.append(try_builder)
continue
# Generate new expectations.
output_file = os.path.join(CHECKOUT_PATH, 'expectations', 'bench',
'bench_expectations_%s.txt' % builder)
try:
subprocess.check_call(['python',
os.path.join(CHECKOUT_PATH, 'bench',
'gen_bench_expectations.py'),
'-b', builder, '-o', output_file,
'-d', dest_dir, '-r', revision])
except subprocess.CalledProcessError:
failed_gen_expectations.append(builder)
failure = ''
if failed_data_pull:
failure += 'Failed to load data for: %s\n\n' % ','.join(failed_data_pull)
if failed_gen_expectations:
failure += 'Failed to generate expectations for: %s\n\n' % ','.join(
failed_gen_expectations)
if failure:
raise Exception(failure)
if __name__ == '__main__':
gen_bench_expectations_from_codereview(sys.argv[1])
|
bsd-3-clause
|
akashsinghal/Speech-Memorization-App
|
speech/Swift/Speech-gRPC-Streaming/env/lib/python3.6/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py
|
2931
|
1675
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
|
apache-2.0
|
adblockplus/gyp
|
test/mac/gyptest-strip-default.py
|
232
|
2448
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that the default STRIP_STYLEs match between different generators.
"""
import TestGyp
import re
import subprocess
import sys
import time
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR='strip'
test.run_gyp('test-defaults.gyp', chdir=CHDIR)
test.build('test-defaults.gyp', test.ALL, chdir=CHDIR)
# Lightweight check if stripping was done.
def OutPath(s):
return test.built_file_path(s, chdir=CHDIR)
def CheckNsyms(p, o_expected):
proc = subprocess.Popen(['nm', '-aU', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
# Filter out mysterious "00 0000 OPT radr://5614542" symbol which
# is apparently only printed on the bots (older toolchain?).
# Yes, "radr", not "rdar".
o = ''.join(filter(lambda s: 'radr://5614542' not in s, o.splitlines(True)))
o = o.replace('A', 'T')
o = re.sub(r'^[a-fA-F0-9]+', 'XXXXXXXX', o, flags=re.MULTILINE)
assert not proc.returncode
if o != o_expected:
print 'Stripping: Expected symbols """\n%s""", got """\n%s"""' % (
o_expected, o)
test.fail_test()
CheckNsyms(OutPath('libsingle_dylib.dylib'),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(OutPath('single_so.so'),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(OutPath('single_exe'),
"""\
XXXXXXXX T __mh_execute_header
""")
CheckNsyms(test.built_file_path(
'bundle_dylib.framework/Versions/A/bundle_dylib', chdir=CHDIR),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(test.built_file_path(
'bundle_so.bundle/Contents/MacOS/bundle_so', chdir=CHDIR),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(test.built_file_path(
'bundle_exe.app/Contents/MacOS/bundle_exe', chdir=CHDIR),
"""\
XXXXXXXX T __mh_execute_header
""")
test.pass_test()
|
bsd-3-clause
|
corruptnova/namebench
|
nb_third_party/dns/rdtypes/IN/A.py
|
248
|
2055
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.ipv4
import dns.rdata
import dns.tokenizer
class A(dns.rdata.Rdata):
"""A record.
@ivar address: an IPv4 address
@type address: string (in the standard "dotted quad" format)"""
__slots__ = ['address']
def __init__(self, rdclass, rdtype, address):
super(A, self).__init__(rdclass, rdtype)
# check that it's OK
junk = dns.ipv4.inet_aton(address)
self.address = address
def to_text(self, origin=None, relativize=True, **kw):
return self.address
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
address = tok.get_identifier()
tok.get_eol()
return cls(rdclass, rdtype, address)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
file.write(dns.ipv4.inet_aton(self.address))
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
address = dns.ipv4.inet_ntoa(wire[current : current + rdlen])
return cls(rdclass, rdtype, address)
from_wire = classmethod(from_wire)
def _cmp(self, other):
sa = dns.ipv4.inet_aton(self.address)
oa = dns.ipv4.inet_aton(other.address)
return cmp(sa, oa)
|
apache-2.0
|
ioannistsanaktsidis/invenio
|
modules/websubmit/lib/functions/DEMOVID_Validation.py
|
28
|
4492
|
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSubmit function - Video processing.
"""
from invenio.bibencode_utils import probe
from invenio.websubmit_config import InvenioWebSubmitFunctionStop
import os
__revision__ = "$Id$"
def DEMOVID_Validation(parameters, curdir, form, user_info=None):
"""
"""
messages = []
malformed = False
file_storing_path = os.path.join(curdir, "files", str(user_info['uid']), "NewFile", 'filepath')
file_storing_name = os.path.join(curdir, "files", str(user_info['uid']), "NewFile", 'filename')
file_storing_aspect = os.path.join(curdir, "DEMOVID_ASPECT")
file_storing_title = os.path.join(curdir, "DEMOVID_TITLE")
file_storing_description = os.path.join(curdir, "DEMOVID_DESCR")
file_storing_author = os.path.join(curdir, "DEMOVID_AU")
file_storing_year = os.path.join(curdir, "DEMOVID_YEAR")
## Validate the uploaded video
try:
fp = open(file_storing_path)
fullpath = fp.read()
fp.close()
fp = open(file_storing_name)
filename = fp.read()
fp.close()
if not probe(fullpath) or os.path.splitext(filename)[1] in ['jpg', 'jpeg', 'gif', 'tiff', 'bmp', 'png', 'tga']:
malformed = True
messages.append("The uploaded file is not a supported video format.")
except:
malformed = True
messages.append("Please upload a video.")
## Validate the title
try:
fp = open(file_storing_title)
title = fp.read()
fp.close()
if len(title) < 2 or len(title) > 50:
malformed = True
messages.append("The title is too short, please enter at least 2 characters.")
except:
malformed = True
messages.append("Please enter a title.")
## Validate the description
try:
fp = open(file_storing_description)
description = fp.read()
fp.close()
if len(description) < 10:
malformed = True
messages.append("The description must be at least 10 characters long.")
except:
malformed = True
messages.append("Please enter a description.")
## Validate author
try:
fp = open(file_storing_author)
author = fp.read()
fp.close()
except:
malformed = True
messages.append("Please enter at least one author.")
## Validate year
try:
fp = open(file_storing_year)
year = fp.read()
fp.close()
except:
malformed = True
messages.append("Please enter a year.")
try:
if int(year) < 1000 or int(year) > 9999:
malformed = True
messages.append("Please enter a valid year. It must consist of 4 digits.")
except:
malformed = True
messages.append("Please enter a valid year. It must consist of 4 digits.")
## Validate the aspect ratio
try:
fp = open(file_storing_aspect)
aspect = fp.read()
fp.close()
try:
aspectx, aspecty = aspect.split(':')
aspectx = int(aspectx)
aspecty = int(aspecty)
except:
malformed = True
messages.append("Aspect ratio was not provided as 'Number:Number' format")
except:
malformed = True
messages.append("Please enter an aspect ratio.")
if malformed:
raise InvenioWebSubmitFunctionStop("""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
alert('%s');
document.forms[0].submit();
</SCRIPT>""" % "\\n".join(messages)
)
else:
return
|
gpl-2.0
|
piensa/geonode
|
geonode/groups/translation.py
|
18
|
1291
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from modeltranslation.translator import translator, TranslationOptions
from geonode.groups.models import GroupCategory, GroupProfile
class GroupCategoryTranslationOptions(TranslationOptions):
fields = ('name',)
class GroupProfileTranslationOptions(TranslationOptions):
fields = ('title', 'description',)
translator.register(GroupCategory, GroupCategoryTranslationOptions)
translator.register(GroupProfile, GroupProfileTranslationOptions)
|
gpl-3.0
|
edxnercel/edx-platform
|
.pycharm_helpers/pydev/pydev_ipython/inputhook.py
|
52
|
18411
|
# coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp()
if app is None:
app = wx.App(redirect=False, clearSigInt=False)
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt4, # qt3 not supported
GUI_QT4: enable_qt4,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt4",
"disable_qt4",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
|
agpl-3.0
|
hickford/youtube-dl
|
youtube_dl/__init__.py
|
32
|
16801
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__license__ = 'Public Domain'
import codecs
import io
import os
import random
import sys
from .options import (
parseOpts,
)
from .compat import (
compat_expanduser,
compat_getpass,
compat_print,
compat_shlex_split,
workaround_optparse_bug9161,
)
from .utils import (
DateRange,
decodeOption,
DEFAULT_OUTTMPL,
DownloadError,
match_filter_func,
MaxDownloadsReached,
preferredencoding,
read_batch_urls,
SameFileError,
setproctitle,
std_headers,
write_string,
)
from .update import update_self
from .downloader import (
FileDownloader,
)
from .extractor import gen_extractors, list_extractors
from .YoutubeDL import YoutubeDL
def _real_main(argv=None):
# Compatibility fixes for Windows
if sys.platform == 'win32':
# https://github.com/rg3/youtube-dl/issues/820
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
workaround_optparse_bug9161()
setproctitle('youtube-dl')
parser, opts, args = parseOpts(argv)
# Set user agent
if opts.user_agent is not None:
std_headers['User-Agent'] = opts.user_agent
# Set referer
if opts.referer is not None:
std_headers['Referer'] = opts.referer
# Custom HTTP headers
if opts.headers is not None:
for h in opts.headers:
if h.find(':', 1) < 0:
parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
key, value = h.split(':', 2)
if opts.verbose:
write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
std_headers[key] = value
# Dump user agent
if opts.dump_user_agent:
compat_print(std_headers['User-Agent'])
sys.exit(0)
# Batch file verification
batch_urls = []
if opts.batchfile is not None:
try:
if opts.batchfile == '-':
batchfd = sys.stdin
else:
batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
batch_urls = read_batch_urls(batchfd)
if opts.verbose:
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
except IOError:
sys.exit('ERROR: batch file could not be read')
all_urls = batch_urls + args
all_urls = [url.strip() for url in all_urls]
_enc = preferredencoding()
all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls]
if opts.list_extractors:
for ie in list_extractors(opts.age_limit):
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
matchedUrls = [url for url in all_urls if ie.suitable(url)]
for mu in matchedUrls:
compat_print(' ' + mu)
sys.exit(0)
if opts.list_extractor_descriptions:
for ie in list_extractors(opts.age_limit):
if not ie._WORKING:
continue
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
if desc is False:
continue
if hasattr(ie, 'SEARCH_KEY'):
_SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow')
_COUNTS = ('', '5', '10', 'all')
desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
compat_print(desc)
sys.exit(0)
# Conflicting, missing and erroneous options
if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error('using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None:
parser.error('account username missing\n')
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
parser.error('using output template conflicts with using title, video ID or auto number')
if opts.usetitle and opts.useid:
parser.error('using title conflicts with using video ID')
if opts.username is not None and opts.password is None:
opts.password = compat_getpass('Type account password and press [Return]: ')
if opts.ratelimit is not None:
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
if numeric_limit is None:
parser.error('invalid rate limit specified')
opts.ratelimit = numeric_limit
if opts.min_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
if numeric_limit is None:
parser.error('invalid min_filesize specified')
opts.min_filesize = numeric_limit
if opts.max_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
if numeric_limit is None:
parser.error('invalid max_filesize specified')
opts.max_filesize = numeric_limit
if opts.retries is not None:
if opts.retries in ('inf', 'infinite'):
opts_retries = float('inf')
else:
try:
opts_retries = int(opts.retries)
except (TypeError, ValueError):
parser.error('invalid retry count specified')
if opts.buffersize is not None:
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
if numeric_buffersize is None:
parser.error('invalid buffer size specified')
opts.buffersize = numeric_buffersize
if opts.playliststart <= 0:
raise ValueError('Playlist start must be positive')
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
raise ValueError('Playlist end must be greater than playlist start')
if opts.extractaudio:
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
parser.error('invalid audio format specified')
if opts.audioquality:
opts.audioquality = opts.audioquality.strip('k').strip('K')
if not opts.audioquality.isdigit():
parser.error('invalid audio quality specified')
if opts.recodevideo is not None:
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
parser.error('invalid video recode format specified')
if opts.convertsubtitles is not None:
if opts.convertsubtitles not in ['srt', 'vtt', 'ass']:
parser.error('invalid subtitle format specified')
if opts.date is not None:
date = DateRange.day(opts.date)
else:
date = DateRange(opts.dateafter, opts.datebefore)
# Do not download videos when there are audio-only formats
if opts.extractaudio and not opts.keepvideo and opts.format is None:
opts.format = 'bestaudio/best'
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
# this was the old behaviour if only --all-sub was given.
if opts.allsubtitles and not opts.writeautomaticsub:
opts.writesubtitles = True
outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or
(opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or
(opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or
(opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or
(opts.usetitle and '%(title)s-%(id)s.%(ext)s') or
(opts.useid and '%(id)s.%(ext)s') or
(opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or
DEFAULT_OUTTMPL)
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
parser.error('Cannot download a video and extract audio into the same'
' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
' template'.format(outtmpl))
any_getting = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
any_printing = opts.print_json
download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
# PostProcessors
postprocessors = []
# Add the metadata pp first, the other pps will copy it
if opts.metafromtitle:
postprocessors.append({
'key': 'MetadataFromTitle',
'titleformat': opts.metafromtitle
})
if opts.addmetadata:
postprocessors.append({'key': 'FFmpegMetadata'})
if opts.extractaudio:
postprocessors.append({
'key': 'FFmpegExtractAudio',
'preferredcodec': opts.audioformat,
'preferredquality': opts.audioquality,
'nopostoverwrites': opts.nopostoverwrites,
})
if opts.recodevideo:
postprocessors.append({
'key': 'FFmpegVideoConvertor',
'preferedformat': opts.recodevideo,
})
if opts.convertsubtitles:
postprocessors.append({
'key': 'FFmpegSubtitlesConvertor',
'format': opts.convertsubtitles,
})
if opts.embedsubtitles:
postprocessors.append({
'key': 'FFmpegEmbedSubtitle',
})
if opts.xattrs:
postprocessors.append({'key': 'XAttrMetadata'})
if opts.embedthumbnail:
already_have_thumbnail = opts.writethumbnail or opts.write_all_thumbnails
postprocessors.append({
'key': 'EmbedThumbnail',
'already_have_thumbnail': already_have_thumbnail
})
if not already_have_thumbnail:
opts.writethumbnail = True
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
if opts.exec_cmd:
postprocessors.append({
'key': 'ExecAfterDownload',
'exec_cmd': opts.exec_cmd,
})
if opts.xattr_set_filesize:
try:
import xattr
xattr # Confuse flake8
except ImportError:
parser.error('setting filesize xattr requested but python-xattr is not available')
external_downloader_args = None
if opts.external_downloader_args:
external_downloader_args = compat_shlex_split(opts.external_downloader_args)
postprocessor_args = None
if opts.postprocessor_args:
postprocessor_args = compat_shlex_split(opts.postprocessor_args)
match_filter = (
None if opts.match_filter is None
else match_filter_func(opts.match_filter))
ydl_opts = {
'usenetrc': opts.usenetrc,
'username': opts.username,
'password': opts.password,
'twofactor': opts.twofactor,
'videopassword': opts.videopassword,
'quiet': (opts.quiet or any_getting or any_printing),
'no_warnings': opts.no_warnings,
'forceurl': opts.geturl,
'forcetitle': opts.gettitle,
'forceid': opts.getid,
'forcethumbnail': opts.getthumbnail,
'forcedescription': opts.getdescription,
'forceduration': opts.getduration,
'forcefilename': opts.getfilename,
'forceformat': opts.getformat,
'forcejson': opts.dumpjson or opts.print_json,
'dump_single_json': opts.dump_single_json,
'simulate': opts.simulate or any_getting,
'skip_download': opts.skip_download,
'format': opts.format,
'listformats': opts.listformats,
'outtmpl': outtmpl,
'autonumber_size': opts.autonumber_size,
'restrictfilenames': opts.restrictfilenames,
'ignoreerrors': opts.ignoreerrors,
'force_generic_extractor': opts.force_generic_extractor,
'ratelimit': opts.ratelimit,
'nooverwrites': opts.nooverwrites,
'retries': opts_retries,
'buffersize': opts.buffersize,
'noresizebuffer': opts.noresizebuffer,
'continuedl': opts.continue_dl,
'noprogress': opts.noprogress,
'progress_with_newline': opts.progress_with_newline,
'playliststart': opts.playliststart,
'playlistend': opts.playlistend,
'playlistreverse': opts.playlist_reverse,
'noplaylist': opts.noplaylist,
'logtostderr': opts.outtmpl == '-',
'consoletitle': opts.consoletitle,
'nopart': opts.nopart,
'updatetime': opts.updatetime,
'writedescription': opts.writedescription,
'writeannotations': opts.writeannotations,
'writeinfojson': opts.writeinfojson,
'writethumbnail': opts.writethumbnail,
'write_all_thumbnails': opts.write_all_thumbnails,
'writesubtitles': opts.writesubtitles,
'writeautomaticsub': opts.writeautomaticsub,
'allsubtitles': opts.allsubtitles,
'listsubtitles': opts.listsubtitles,
'subtitlesformat': opts.subtitlesformat,
'subtitleslangs': opts.subtitleslangs,
'matchtitle': decodeOption(opts.matchtitle),
'rejecttitle': decodeOption(opts.rejecttitle),
'max_downloads': opts.max_downloads,
'prefer_free_formats': opts.prefer_free_formats,
'verbose': opts.verbose,
'dump_intermediate_pages': opts.dump_intermediate_pages,
'write_pages': opts.write_pages,
'test': opts.test,
'keepvideo': opts.keepvideo,
'min_filesize': opts.min_filesize,
'max_filesize': opts.max_filesize,
'min_views': opts.min_views,
'max_views': opts.max_views,
'daterange': date,
'cachedir': opts.cachedir,
'youtube_print_sig_code': opts.youtube_print_sig_code,
'age_limit': opts.age_limit,
'download_archive': download_archive_fn,
'cookiefile': opts.cookiefile,
'nocheckcertificate': opts.no_check_certificate,
'prefer_insecure': opts.prefer_insecure,
'proxy': opts.proxy,
'socket_timeout': opts.socket_timeout,
'bidi_workaround': opts.bidi_workaround,
'debug_printtraffic': opts.debug_printtraffic,
'prefer_ffmpeg': opts.prefer_ffmpeg,
'include_ads': opts.include_ads,
'default_search': opts.default_search,
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
'encoding': opts.encoding,
'extract_flat': opts.extract_flat,
'merge_output_format': opts.merge_output_format,
'postprocessors': postprocessors,
'fixup': opts.fixup,
'source_address': opts.source_address,
'call_home': opts.call_home,
'sleep_interval': opts.sleep_interval,
'external_downloader': opts.external_downloader,
'list_thumbnails': opts.list_thumbnails,
'playlist_items': opts.playlist_items,
'xattr_set_filesize': opts.xattr_set_filesize,
'match_filter': match_filter,
'no_color': opts.no_color,
'ffmpeg_location': opts.ffmpeg_location,
'hls_prefer_native': opts.hls_prefer_native,
'external_downloader_args': external_downloader_args,
'postprocessor_args': postprocessor_args,
'cn_verification_proxy': opts.cn_verification_proxy,
}
with YoutubeDL(ydl_opts) as ydl:
# Update version
if opts.update_self:
update_self(ydl.to_screen, opts.verbose)
# Remove cache dir
if opts.rm_cachedir:
ydl.cache.remove()
# Maybe do nothing
if (len(all_urls) < 1) and (opts.load_info_filename is None):
if opts.update_self or opts.rm_cachedir:
sys.exit()
ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
parser.error(
'You must provide at least one URL.\n'
'Type youtube-dl --help to see a list of all options.')
try:
if opts.load_info_filename is not None:
retcode = ydl.download_with_info_file(opts.load_info_filename)
else:
retcode = ydl.download(all_urls)
except MaxDownloadsReached:
ydl.to_screen('--max-download limit reached, aborting.')
retcode = 101
sys.exit(retcode)
def main(argv=None):
try:
_real_main(argv)
except DownloadError:
sys.exit(1)
except SameFileError:
sys.exit('ERROR: fixed output name but more than one file to download')
except KeyboardInterrupt:
sys.exit('\nERROR: Interrupted by user')
__all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']
|
unlicense
|
AlanZatarain/cortex-vfx
|
test/IECore/TypeIdTest.py
|
12
|
2531
|
##########################################################################
#
# Copyright (c) 2008, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class TypeIdTest( unittest.TestCase ) :
def testNoDuplicates( self ) :
#boost.python.enum doesn't allow us to ensure that no duplicate values are in it. Here we do a simple verification to ensure
#that the value of each attribute IECore.TypeId.XXXXX is present in the "values" array. If it's not, the likely cause
#is a duplicate.
ids = {}
num = 0
for i in dir( IECore.TypeId ) :
v = getattr( IECore.TypeId, i )
if type( v ) is IECore.TypeId :
if v in ids :
raise RuntimeError ("TypeId for %s is a duplicate of %s" % ( i, ids[v] ) )
ids[v] = i
num = num + 1
self.assertEqual( num, len( IECore.TypeId.values ) )
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
benoitsteiner/tensorflow-xsmm
|
tensorflow/python/saved_model/utils_test.py
|
62
|
5133
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import utils
class UtilsTest(test.TestCase):
def testBuildTensorInfoDense(self):
x = array_ops.placeholder(dtypes.float32, 1, name="x")
x_tensor_info = utils.build_tensor_info(x)
self.assertEqual("x:0", x_tensor_info.name)
self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
self.assertEqual(1, len(x_tensor_info.tensor_shape.dim))
self.assertEqual(1, x_tensor_info.tensor_shape.dim[0].size)
def testBuildTensorInfoSparse(self):
x = array_ops.sparse_placeholder(dtypes.float32, [42, 69], name="x")
x_tensor_info = utils.build_tensor_info(x)
self.assertEqual(x.values.name,
x_tensor_info.coo_sparse.values_tensor_name)
self.assertEqual(x.indices.name,
x_tensor_info.coo_sparse.indices_tensor_name)
self.assertEqual(x.dense_shape.name,
x_tensor_info.coo_sparse.dense_shape_tensor_name)
self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
self.assertEqual(2, len(x_tensor_info.tensor_shape.dim))
self.assertEqual(42, x_tensor_info.tensor_shape.dim[0].size)
self.assertEqual(69, x_tensor_info.tensor_shape.dim[1].size)
def testGetTensorFromInfoDense(self):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(expected)
actual = utils.get_tensor_from_tensor_info(tensor_info)
self.assertIsInstance(actual, ops.Tensor)
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoSparse(self):
expected = array_ops.sparse_placeholder(dtypes.float32, name="x")
tensor_info = utils.build_tensor_info(expected)
actual = utils.get_tensor_from_tensor_info(tensor_info)
self.assertIsInstance(actual, sparse_tensor.SparseTensor)
self.assertEqual(expected.values.name, actual.values.name)
self.assertEqual(expected.indices.name, actual.indices.name)
self.assertEqual(expected.dense_shape.name, actual.dense_shape.name)
def testGetTensorFromInfoInOtherGraph(self):
with ops.Graph().as_default() as expected_graph:
expected = array_ops.placeholder(dtypes.float32, 1, name="right")
tensor_info = utils.build_tensor_info(expected)
with ops.Graph().as_default(): # Some other graph.
array_ops.placeholder(dtypes.float32, 1, name="other")
actual = utils.get_tensor_from_tensor_info(tensor_info,
graph=expected_graph)
self.assertIsInstance(actual, ops.Tensor)
self.assertIs(actual.graph, expected_graph)
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoInScope(self):
# Build a TensorInfo with name "bar/x:0".
with ops.Graph().as_default():
with ops.name_scope("bar"):
unscoped = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(unscoped)
self.assertEqual("bar/x:0", tensor_info.name)
# Build a graph with node "foo/bar/x:0", akin to importing into scope foo.
with ops.Graph().as_default():
with ops.name_scope("foo"):
with ops.name_scope("bar"):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
self.assertEqual("foo/bar/x:0", expected.name)
# Test that tensor is found by prepending the import scope.
actual = utils.get_tensor_from_tensor_info(tensor_info,
import_scope="foo")
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoRaisesErrors(self):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(expected)
tensor_info.name = "blah:0" # Nonexistant name.
with self.assertRaises(KeyError):
utils.get_tensor_from_tensor_info(tensor_info)
tensor_info.ClearField("name") # Malformed (missing encoding).
with self.assertRaises(ValueError):
utils.get_tensor_from_tensor_info(tensor_info)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
geerlingguy/ansible-modules-extras
|
cloud/amazon/dynamodb_table.py
|
35
|
8108
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: dynamodb_table
short_description: Create, update or delete AWS Dynamo DB tables.
description:
- Create or delete AWS Dynamo DB tables.
- Can update the provisioned throughput on existing tables.
- Returns the status of the specified table.
version_added: "2.0"
author: Alan Loi (@loia)
version_added: "2.0"
requirements:
- "boto >= 2.13.2"
options:
state:
description:
- Create or delete the table
required: false
choices: ['present', 'absent']
default: 'present'
name:
description:
- Name of the table.
required: true
hash_key_name:
description:
- Name of the hash key.
- Required when C(state=present).
required: false
default: null
hash_key_type:
description:
- Type of the hash key.
required: false
choices: ['STRING', 'NUMBER', 'BINARY']
default: 'STRING'
range_key_name:
description:
- Name of the range key.
required: false
default: null
range_key_type:
description:
- Type of the range key.
required: false
choices: ['STRING', 'NUMBER', 'BINARY']
default: 'STRING'
read_capacity:
description:
- Read throughput capacity (units) to provision.
required: false
default: 1
write_capacity:
description:
- Write throughput capacity (units) to provision.
required: false
default: 1
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
extends_documentation_fragment: aws
"""
EXAMPLES = '''
# Create dynamo table with hash and range primary key
- dynamodb_table:
name: my-table
region: us-east-1
hash_key_name: id
hash_key_type: STRING
range_key_name: create_time
range_key_type: NUMBER
read_capacity: 2
write_capacity: 2
# Update capacity on existing dynamo table
- dynamodb_table:
name: my-table
region: us-east-1
read_capacity: 10
write_capacity: 10
# Delete dynamo table
- dynamodb_table:
name: my-table
region: us-east-1
state: absent
'''
RETURN = '''
table_status:
description: The current status of the table.
returned: success
type: string
sample: ACTIVE
'''
try:
import boto
import boto.dynamodb2
from boto.dynamodb2.table import Table
from boto.dynamodb2.fields import HashKey, RangeKey
from boto.dynamodb2.types import STRING, NUMBER, BINARY
from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
DYNAMO_TYPE_MAP = {
'STRING': STRING,
'NUMBER': NUMBER,
'BINARY': BINARY
}
def create_or_update_dynamo_table(connection, module):
table_name = module.params.get('name')
hash_key_name = module.params.get('hash_key_name')
hash_key_type = module.params.get('hash_key_type')
range_key_name = module.params.get('range_key_name')
range_key_type = module.params.get('range_key_type')
read_capacity = module.params.get('read_capacity')
write_capacity = module.params.get('write_capacity')
schema = [
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type)),
RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type))
]
throughput = {
'read': read_capacity,
'write': write_capacity
}
result = dict(
region=module.params.get('region'),
table_name=table_name,
hash_key_name=hash_key_name,
hash_key_type=hash_key_type,
range_key_name=range_key_name,
range_key_type=range_key_type,
read_capacity=read_capacity,
write_capacity=write_capacity,
)
try:
table = Table(table_name, connection=connection)
if dynamo_table_exists(table):
result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode)
else:
if not module.check_mode:
Table.create(table_name, connection=connection, schema=schema, throughput=throughput)
result['changed'] = True
if not module.check_mode:
result['table_status'] = table.describe()['Table']['TableStatus']
except BotoServerError:
result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def delete_dynamo_table(connection, module):
table_name = module.params.get('name')
result = dict(
region=module.params.get('region'),
table_name=table_name,
)
try:
table = Table(table_name, connection=connection)
if dynamo_table_exists(table):
if not module.check_mode:
table.delete()
result['changed'] = True
else:
result['changed'] = False
except BotoServerError:
result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def dynamo_table_exists(table):
try:
table.describe()
return True
except JSONResponseError, e:
if e.message and e.message.startswith('Requested resource not found'):
return False
else:
raise e
def update_dynamo_table(table, throughput=None, check_mode=False):
table.describe() # populate table details
if has_throughput_changed(table, throughput):
if not check_mode:
return table.update(throughput=throughput)
else:
return True
return False
def has_throughput_changed(table, new_throughput):
if not new_throughput:
return False
return new_throughput['read'] != table.throughput['read'] or \
new_throughput['write'] != table.throughput['write']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
name=dict(required=True, type='str'),
hash_key_name=dict(required=True, type='str'),
hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
range_key_name=dict(type='str'),
range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
read_capacity=dict(default=1, type='int'),
write_capacity=dict(default=1, type='int'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg='region must be specified')
try:
connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
except (NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
state = module.params.get('state')
if state == 'present':
create_or_update_dynamo_table(connection, module)
elif state == 'absent':
delete_dynamo_table(connection, module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
unkyulee/elastic-cms
|
src/web/modules/post/services/config.py
|
1
|
2826
|
import web.util.tools as tools
import os
from web import app
import lib.es as es
def get(p):
# get host and index from the global config
h = tools.get_conf(p['host'], p['navigation']['id'], 'host', 'http://localhost:9200')
n = tools.get_conf(p['host'], p['navigation']['id'], 'index', '')
return {
'name': get_conf(h, n, 'name', ''),
'description': get_conf(h, n, 'description', ''),
'host': h,
'index': n,
'upload_dir':
get_conf(h, n, 'upload_dir',
os.path.join( app.config.get('BASE_DIR'), 'uploads' )
),
'allowed_exts': get_conf(h, n, 'allowed_exts',''),
'page_size': get_conf(h, n, 'page_size', '10'),
'query': get_conf(h, n, 'query', '*'),
'sort_field': get_conf(h, n, 'sort_field', '_score'),
'sort_dir': get_conf(h, n, 'sort_dir', 'desc'),
'top': get_conf(h, n, 'top', ''),
'footer': get_conf(h, n, 'footer', ''),
'side': get_conf(h, n, 'side', ''),
'content_header': get_conf(h, n, 'content_header', ''),
'content_footer': get_conf(h, n, 'content_footer', ''),
'intro': get_conf(h, n, 'intro', ''),
'search_query': get_conf(h, n, 'search_query', ''),
'search_item_template': get_conf(h, n, 'search_item_template', ''),
'keep_history': get_conf(h, n, 'keep_history', 'Yes'),
}
def set(p):
# get host, index
host = p['c']['host']
if not host:
host = tools.get('host')
index = p['c']['index']
if not index:
index = tools.get('index')
# get host and index from the global config
tools.set_conf(p['host'], p['navigation']['id'], 'host', host)
tools.set_conf(p['host'], p['navigation']['id'], 'index', index)
# set local config
if p['c']['index']: # save local config only when index is already created
set_conf(host, index, 'name', tools.get('name'))
set_conf(host, index, 'description', tools.get('description'))
set_conf(host, index, 'upload_dir', tools.get('upload_dir'))
set_conf(host, index, 'allowed_exts', tools.get('allowed_exts'))
set_conf(host, index, 'page_size', tools.get('page_size'))
set_conf(host, index, 'query', tools.get('query'))
set_conf(host, index, 'sort_field', tools.get('sort_field'))
set_conf(host, index, 'sort_dir', tools.get('sort_dir'))
set_conf(host, index, 'keep_history', tools.get('keep_history'))
def get_conf(host, index, name, default):
ret = es.get(host, index, "config", name)
return ret.get('value') if ret and ret.get('value') else default
def set_conf(host, index, name, value):
config = {
'name': name,
'value': value
}
es.update(host, index, "config", name, config)
es.flush(host, index)
|
mit
|
GutenkunstLab/SloppyCell
|
test/test_FixedPoints.py
|
1
|
3610
|
import unittest
import scipy
from SloppyCell.ReactionNetworks import *
lorenz = Network('lorenz')
lorenz.add_compartment('basic')
lorenz.add_species('x', 'basic', 0.5)
lorenz.add_species('y', 'basic', 0.5)
lorenz.add_species('z', 'basic', 0.5)
lorenz.add_parameter('sigma', 1.0)
lorenz.add_parameter('r', 2.0)
lorenz.add_parameter('b', 2.0)
lorenz.add_rate_rule('x', 'sigma*(y-x)')
lorenz.add_rate_rule('y', 'r*x - y - x*z')
lorenz.add_rate_rule('z', 'x*y - b*z')
class test_fixedpoints(unittest.TestCase):
def test_basic(self):
""" Test basic fixed-point finding """
net = lorenz.copy('test')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1], with_logs=False)
# This should find the fixed-point [sqrt(2), sqrt(2), 1]
self.assertAlmostEqual(fp[0], scipy.sqrt(2), 6, 'Failed on basic 1,0.')
self.assertAlmostEqual(fp[1], scipy.sqrt(2), 6, 'Failed on basic 1,1.')
self.assertAlmostEqual(fp[2], 1, 6, 'Failed on basic 1,2.')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[-0.1,-0.1,-0.1],
with_logs=False)
# This should find the fixed-point [0, 0, 0]
self.assertAlmostEqual(fp[0], 0, 6, 'Failed on basic 2,0.')
self.assertAlmostEqual(fp[1], 0, 6, 'Failed on basic 2,1.')
self.assertAlmostEqual(fp[2], 0, 6, 'Failed on basic 2,2.')
def test_withlogs(self):
""" Test fixed-point finding with logs """
net = lorenz.copy('test')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1], with_logs=True)
# This should find the fixed-point [sqrt(2), sqrt(2), 1]
self.assertAlmostEqual(fp[0], scipy.sqrt(2), 6, 'Failed on logs 1,0.')
self.assertAlmostEqual(fp[1], scipy.sqrt(2), 6, 'Failed on logs 1,1.')
self.assertAlmostEqual(fp[2], 1, 6, 'Failed on logs 1,2.')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[0.1,0.1,0.1],
with_logs=True)
# This should find the fixed-point [0, 0, 0]
self.assertAlmostEqual(fp[0], 0, 6, 'Failed on logs 2,0.')
self.assertAlmostEqual(fp[1], 0, 6, 'Failed on logs 2,1.')
self.assertAlmostEqual(fp[2], 0, 6, 'Failed on logs 2,2.')
def test_stability(self):
net = lorenz.copy('test')
# The sqrt(b*(r-1)), sqrt(b*(r-1)), r-1 fixed point is stable for r < rH
# Strogatz, Nonlinear Dynamics and Chaos (p. 316)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1],
stability=True)
self.assertEqual(stable, -1, 'Failed to classify stable fixed point')
# (0,0,0) is a saddle here
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[0.01,0.01,0.01],
stability=True)
self.assertEqual(stable, 0, 'Failed to classify saddle')
# (0,0,0) is a stable node here
net.set_var_ic('r', 0.5)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[0.1,0.1,0.1],
stability=True)
self.assertEqual(stable, -1, 'Failed to classify stable fixed point')
# Now make the far fixed point a saddle...
net.set_var_ic('sigma', 6.0)
net.set_var_ic('r', 25)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[10,10,10],
stability=True)
self.assertEqual(stable, 0, 'Failed to classify saddle')
suite = unittest.makeSuite(test_fixedpoints)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
bitemyapp/Suns
|
src/pika/exceptions.py
|
6
|
4260
|
"""Pika specific exceptions"""
class AMQPError(Exception):
def __repr__(self):
return 'An unspecified AMQP error has occurred'
class AMQPConnectionError(AMQPError):
def __repr__(self):
if len(self.args) == 1:
if (self.args[0] == 1):
return ('No connection could be opened after 1 '
'connection attempt')
else:
return ('No connection could be opened after %s '
'connection attempts' %
self.args[0])
elif len(self.args) == 2:
return '%s: %s' % (self.args[0], self.args[1])
class IncompatibleProtocolError(AMQPConnectionError):
def __repr__(self):
return 'The protocol returned by the server is not supported'
class AuthenticationError(AMQPConnectionError):
def __repr__(self):
return ('Server and client could not negotiate use of the %s '
'authentication mechanism' % self.args[0])
class ProbableAuthenticationError(AMQPConnectionError):
def __repr__(self):
return ('Client was disconnected at a connection stage indicating a '
'probable authentication error')
class ProbableAccessDeniedError(AMQPConnectionError):
def __repr__(self):
return ('Client was disconnected at a connection stage indicating a '
'probable denial of access to the specified virtual host')
class NoFreeChannels(AMQPConnectionError):
def __repr__(self):
return 'The connection has run out of free channels'
class ConnectionClosed(AMQPConnectionError):
def __repr__(self):
return 'The AMQP connection was closed (%s) %s' % (self.args[0],
self.args[1])
class AMQPChannelError(AMQPError):
def __repr__(self):
return 'An unspecified AMQP channel error has occurred'
class ChannelClosed(AMQPChannelError):
def __repr__(self):
return 'The channel was remotely closed (%s) %s' % (self.args[0],
self.args[1])
class DuplicateConsumerTag(AMQPChannelError):
def __repr__(self):
return ('The consumer tag specified already exists for this '
'channel: %s' % self.args[0])
class ConsumerCancelled(AMQPChannelError):
def __repr__(self):
return 'Server cancelled consumer (%s): %s' % (self.args[0].reply_code,
self.args[0].reply_text)
class InvalidChannelNumber(AMQPError):
def __repr__(self):
return 'An invalid channel number has been specified: %s' % self.args[0]
class ProtocolSyntaxError(AMQPError):
def __repr__(self):
return 'An unspecified protocol syntax error occurred'
class UnexpectedFrameError(ProtocolSyntaxError):
def __repr__(self):
return 'Received a frame out of sequence: %r' % self.args[0]
class ProtocolVersionMismatch(ProtocolSyntaxError):
def __repr__(self):
return 'Protocol versions did not match: %r vs %r' % (self.args[0],
self.args[1])
class BodyTooLongError(ProtocolSyntaxError):
def __repr__(self):
return ('Received too many bytes for a message delivery: '
'Received %i, expected %i' % (self.args[0], self.args[1]))
class InvalidFrameError(ProtocolSyntaxError):
def __repr__(self):
return 'Invalid frame received: %r' % self.args[0]
class InvalidFieldTypeException(ProtocolSyntaxError):
def __repr__(self):
return 'Unsupported field kind %s' % self.args[0]
class UnspportedAMQPFieldException(ProtocolSyntaxError):
def __repr__(self):
return 'Unsupported field kind %s' % type(self.args[1])
class MethodNotImplemented(AMQPError):
pass
class ChannelError(Exception):
def __repr__(self):
return 'An unspecified error occurred with the Channel'
class InvalidMinimumFrameSize(ProtocolSyntaxError):
def __repr__(self):
return 'AMQP Minimum Frame Size is 4096 Bytes'
class InvalidMaximumFrameSize(ProtocolSyntaxError):
def __repr__(self):
return 'AMQP Maximum Frame Size is 131072 Bytes'
|
bsd-3-clause
|
diagramsoftware/odoo
|
addons/sale/res_partner.py
|
236
|
1722
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_partner(osv.osv):
_inherit = 'res.partner'
def _sale_order_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict(map(lambda x: (x,0), ids))
# The current user may not have access rights for sale orders
try:
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = len(partner.sale_order_ids) + len(partner.mapped('child_ids.sale_order_ids'))
except:
pass
return res
_columns = {
'sale_order_count': fields.function(_sale_order_count, string='# of Sales Order', type='integer'),
'sale_order_ids': fields.one2many('sale.order','partner_id','Sales Order')
}
|
agpl-3.0
|
sijis/err
|
tests/backend_tests/text_test.py
|
3
|
1047
|
import sys
from tempfile import mkdtemp
import pytest
import logging
import os
from errbot.backends.text import TextBackend
from errbot.bootstrap import bot_config_defaults
@pytest.fixture
def text_backend():
tempdir = mkdtemp()
# reset the config every time
sys.modules.pop('errbot.config-template', None)
__import__('errbot.config-template')
config = sys.modules['errbot.config-template']
bot_config_defaults(config)
config.BOT_DATA_DIR = tempdir
config.BOT_LOG_FILE = os.path.join(tempdir, 'log.txt')
config.BOT_EXTRA_PLUGIN_DIR = []
config.BOT_LOG_LEVEL = logging.DEBUG
config.BOT_PREFIX = '!'
config.BOT_IDENTITY['username'] = '@testme'
config.BOT_ADMINS = ['@test_admin']
return TextBackend(config)
def test_change_presence(text_backend, caplog):
with caplog.at_level(logging.DEBUG):
text_backend.change_presence('online', "I'm online")
assert len(caplog.messages) == 1
a_message = caplog.messages[0]
assert a_message.startswith('*** Changed presence')
|
gpl-3.0
|
lijoantony/django-oscar
|
src/oscar/apps/dashboard/catalogue/tables.py
|
27
|
2631
|
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
from django_tables2 import Column, LinkColumn, TemplateColumn, A
from oscar.core.loading import get_class, get_model
DashboardTable = get_class('dashboard.tables', 'DashboardTable')
Product = get_model('catalogue', 'Product')
Category = get_model('catalogue', 'Category')
class ProductTable(DashboardTable):
title = TemplateColumn(
verbose_name=_('Title'),
template_name='dashboard/catalogue/product_row_title.html',
order_by='title', accessor=A('title'))
image = TemplateColumn(
verbose_name=_('Image'),
template_name='dashboard/catalogue/product_row_image.html',
orderable=False)
product_class = Column(
verbose_name=_('Product type'),
accessor=A('product_class'),
order_by='product_class__name')
variants = TemplateColumn(
verbose_name=_("Variants"),
template_name='dashboard/catalogue/product_row_variants.html',
orderable=False
)
stock_records = TemplateColumn(
verbose_name=_('Stock records'),
template_name='dashboard/catalogue/product_row_stockrecords.html',
orderable=False)
actions = TemplateColumn(
verbose_name=_('Actions'),
template_name='dashboard/catalogue/product_row_actions.html',
orderable=False)
icon = "sitemap"
class Meta(DashboardTable.Meta):
model = Product
fields = ('upc', 'date_updated')
sequence = ('title', 'upc', 'image', 'product_class', 'variants',
'stock_records', '...', 'date_updated', 'actions')
order_by = '-date_updated'
class CategoryTable(DashboardTable):
name = LinkColumn('dashboard:catalogue-category-update', args=[A('pk')])
description = TemplateColumn(
template_code='{{ record.description|default:""|striptags'
'|cut:" "|truncatewords:6 }}')
# mark_safe is needed because of
# https://github.com/bradleyayers/django-tables2/issues/187
num_children = LinkColumn(
'dashboard:catalogue-category-detail-list', args=[A('pk')],
verbose_name=mark_safe(_('Number of child categories')),
accessor='get_num_children',
orderable=False)
actions = TemplateColumn(
template_name='dashboard/catalogue/category_row_actions.html',
orderable=False)
icon = "sitemap"
caption = ungettext_lazy("%s Category", "%s Categories")
class Meta(DashboardTable.Meta):
model = Category
fields = ('name', 'description')
|
bsd-3-clause
|
Meriipu/quodlibet
|
tests/plugin/test_console.py
|
4
|
1050
|
# Copyright 2017 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk
from quodlibet.formats import AudioFile
from quodlibet.util.songwrapper import SongWrapper
from tests.plugin import PluginTestCase
AUDIO_FILE = SongWrapper(AudioFile({'~filename': "/tmp/foobar"}))
class TConsole(PluginTestCase):
def setUp(self):
self.mod = self.modules["Python Console Sidebar"]
def tearDown(self):
del self.mod
def test_sidebar_plugin(self):
plugin = self.mod.PyConsoleSidebar()
plugin.enabled()
self.failUnless(isinstance(plugin.create_sidebar(), Gtk.Widget), True)
plugin.plugin_on_songs_selected([AUDIO_FILE])
self.failUnlessEqual(plugin.console.namespace.get('songs'),
[AUDIO_FILE])
plugin.disabled()
|
gpl-2.0
|
CloudBotIRC/CloudBotLegacy
|
plugins/brainfuck.py
|
6
|
2477
|
"""brainfuck interpreter adapted from (public domain) code at
http://brainfuck.sourceforge.net/brain.py"""
import re
import random
from util import hook
BUFFER_SIZE = 5000
MAX_STEPS = 1000000
@hook.command('brainfuck')
@hook.command
def bf(inp):
"""bf <prog> -- Executes <prog> as Brainfuck code."""
program = re.sub('[^][<>+-.,]', '', inp)
# create a dict of brackets pairs, for speed later on
brackets = {}
open_brackets = []
for pos in range(len(program)):
if program[pos] == '[':
open_brackets.append(pos)
elif program[pos] == ']':
if len(open_brackets) > 0:
brackets[pos] = open_brackets[-1]
brackets[open_brackets[-1]] = pos
open_brackets.pop()
else:
return 'unbalanced brackets'
if len(open_brackets) != 0:
return 'unbalanced brackets'
# now we can start interpreting
ip = 0 # instruction pointer
mp = 0 # memory pointer
steps = 0
memory = [0] * BUFFER_SIZE # initial memory area
rightmost = 0
output = "" # we'll save the output here
# the main program loop:
while ip < len(program):
c = program[ip]
if c == '+':
memory[mp] += 1 % 256
elif c == '-':
memory[mp] -= 1 % 256
elif c == '>':
mp += 1
if mp > rightmost:
rightmost = mp
if mp >= len(memory):
# no restriction on memory growth!
memory.extend([0] * BUFFER_SIZE)
elif c == '<':
mp -= 1 % len(memory)
elif c == '.':
output += chr(memory[mp])
if len(output) > 500:
break
elif c == ',':
memory[mp] = random.randint(1, 255)
elif c == '[':
if memory[mp] == 0:
ip = brackets[ip]
elif c == ']':
if memory[mp] != 0:
ip = brackets[ip]
ip += 1
steps += 1
if steps > MAX_STEPS:
if output == '':
output = '(no output)'
output += '[exceeded {} iterations]'.format(MAX_STEPS)
break
stripped_output = re.sub(r'[\x00-\x1F]', '', output)
if stripped_output == '':
if output != '':
return 'no printable output'
return 'no output'
return stripped_output[:430].decode('utf8', 'ignore')
|
gpl-3.0
|
tcarmelveilleux/IcarusAltimeter
|
Analysis/altitude_analysis.py
|
1
|
1202
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 19:34:31 2015
@author: Tennessee
"""
import numpy as np
import matplotlib.pyplot as plt
def altitude(atm_hpa, sea_level_hpa):
return 44330 * (1.0 - np.power(atm_hpa / sea_level_hpa, 0.1903))
def plot_alt():
default_msl = 101300.0
pressure = np.linspace(97772.58 / 100.0, 79495.0 / 100.0, 2000)
alt_nominal = altitude(pressure, default_msl) - altitude(97772.58 / 100.0, default_msl)
alt_too_high = altitude(pressure, default_msl + (1000 / 100.0)) - altitude(97772.58 / 100.0, default_msl + (1000 / 100.0))
alt_too_low = altitude(pressure, default_msl - (1000 / 100.0)) - altitude(97772.58 / 100.0, default_msl - (1000 / 100.0))
f1 = plt.figure()
ax = f1.gca()
ax.plot(pressure, alt_nominal, "b-", label="nom")
ax.plot(pressure, alt_too_high, "r-", label="high")
ax.plot(pressure, alt_too_low, "g-", label="low")
ax.legend()
f1.show()
f2 = plt.figure()
ax = f2.gca()
ax.plot(pressure, alt_too_high - alt_nominal, "r-", label="high")
ax.plot(pressure, alt_too_low - alt_nominal, "g-", label="low")
ax.legend()
f2.show()
|
mit
|
endlessm/chromium-browser
|
third_party/catapult/third_party/typ/typ/tests/stats_test.py
|
84
|
2292
|
# Copyright 2014 Dirk Pranke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typ.stats import Stats
class TestStats(unittest.TestCase):
def test_basic(self):
s = Stats('foo', lambda: 0, 32)
self.assertEqual(s.format(), 'foo')
def test_edges(self):
s = Stats('[%s/%f/%t/%r/%p]', lambda: 0, 32)
self.assertEqual(s.format(), '[0/0/0/0/-]')
s.started = 3
s.total = 5
s.finished = 1
self.assertEqual(s.format(), '[3/1/5/2/ 60.0]')
s.started = 5
s.finished = 5
self.assertEqual(s.format(), '[5/5/5/0/100.0]')
def test_elapsed_time(self):
times = [0.0, 0.4]
s = Stats('[%e]', lambda: times.pop(0), 32)
self.assertEqual(s.format(), '[0.400]')
s = Stats('[%e]', lambda: 0, 32)
self.assertEqual(s.format(), '[0.000]')
def test_current_rate(self):
times = [0.0, 0.1, 0.2]
s = Stats('[%c]', lambda: times.pop(0), 1)
self.assertEquals(s.format(), '[-]')
s.add_time()
s.add_time()
self.assertEquals(s.format(), '[ 10.0]')
def test_overall_rate(self):
times = [0, 0, 5]
s = Stats('[%o]', lambda: times.pop(0), 32)
self.assertEqual(s.format(), '[-]')
s.started = 3
s.finished = 1
s.total = 5
self.assertEqual(s.format(), '[ 0.2]')
def test_escaped_percent(self):
s = Stats('%%', lambda: 0, 32)
self.assertEqual(s.format(), '%')
def test_unrecognized_escape(self):
s = Stats('%x', lambda: 0, 32)
self.assertEqual(s.format(), '%x')
def test_remaining(self):
s = Stats('%u', lambda: 0, 32)
s.total = 2
self.assertEqual(s.format(), '2')
|
bsd-3-clause
|
clef/python-social-auth
|
social/tests/backends/test_azuread.py
|
58
|
3180
|
"""
Copyright (c) 2015 Microsoft Open Technologies, Inc.
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
from social.tests.backends.oauth import OAuth2Test
class AzureADOAuth2Test(OAuth2Test):
backend_path = 'social.backends.azuread.AzureADOAuth2'
user_data_url = 'https://graph.windows.net/me'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer',
'id_token': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJodHRwczovL'
'3N0cy53aW5kb3dzLm5ldC83Mjc0MDZhYy03MDY4LTQ4ZmEtOTJiOS1jMmQ'
'2NzIxMWJjNTAvIiwiaWF0IjpudWxsLCJleHAiOm51bGwsImF1ZCI6IjAyO'
'WNjMDEwLWJiNzQtNGQyYi1hMDQwLWY5Y2VkM2ZkMmM3NiIsInN1YiI6In'
'FVOHhrczltSHFuVjZRMzR6aDdTQVpvY2loOUV6cnJJOW1wVlhPSWJWQTg'
'iLCJ2ZXIiOiIxLjAiLCJ0aWQiOiI3Mjc0MDZhYy03MDY4LTQ4ZmEtOTJi'
'OS1jMmQ2NzIxMWJjNTAiLCJvaWQiOiI3ZjhlMTk2OS04YjgxLTQzOGMtO'
'GQ0ZS1hZDZmNTYyYjI4YmIiLCJ1cG4iOiJmb29iYXJAdGVzdC5vbm1pY3'
'Jvc29mdC5jb20iLCJnaXZlbl9uYW1lIjoiZm9vIiwiZmFtaWx5X25hbWU'
'iOiJiYXIiLCJuYW1lIjoiZm9vIGJhciIsInVuaXF1ZV9uYW1lIjoiZm9v'
'YmFyQHRlc3Qub25taWNyb3NvZnQuY29tIiwicHdkX2V4cCI6IjQ3MzMwO'
'TY4IiwicHdkX3VybCI6Imh0dHBzOi8vcG9ydGFsLm1pY3Jvc29mdG9ubG'
'luZS5jb20vQ2hhbmdlUGFzc3dvcmQuYXNweCJ9.3V50dHXTZOHj9UWtkn'
'2g7BjX5JxNe8skYlK4PdhiLz4',
'expires_in': 3600,
'expires_on': 1423650396,
'not_before': 1423646496
})
refresh_token_body = json.dumps({
'access_token': 'foobar-new-token',
'token_type': 'bearer',
'expires_in': 3600,
'refresh_token': 'foobar-new-refresh-token',
'scope': 'identity'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
def test_refresh_token(self):
user, social = self.do_refresh_token()
self.assertEqual(social.extra_data['access_token'], 'foobar-new-token')
|
bsd-3-clause
|
SmartPeople/zulip
|
zerver/webhooks/zapier/view.py
|
14
|
1068
|
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from typing import Any, Callable, Dict
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile, Client
@api_key_only_webhook_view('Zapier')
@has_request_variables
def api_zapier_webhook(request, user_profile, client,
payload=REQ(argument_type='body'),
stream=REQ(default='zapier')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], str) -> HttpResponse
subject = payload.get('subject')
content = payload.get('content')
if subject is None:
return json_error(_("Subject can't be empty"))
if content is None:
return json_error(_("Content can't be empty"))
check_send_message(user_profile, client, "stream", [stream], subject, content)
return json_success()
|
apache-2.0
|
tokenly/counterparty-lib
|
counterpartylib/test/conftest.py
|
2
|
5555
|
#! /usr/bin/python3
"""
Test suite configuration
"""
import os
import sys
current_dir = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
base_dir = os.path.normpath(os.path.join(current_dir, '../../'))
sys.path.insert(0, base_dir)
import json, binascii, apsw
from datetime import datetime
import pytest, util_test
from fixtures.vectors import UNITTEST_VECTOR
from fixtures.params import DEFAULT_PARAMS
from fixtures.scenarios import INTEGRATION_SCENARIOS
from counterpartylib.lib import config, util, backend, transaction
import bitcoin as bitcoinlib
import bitcoin.rpc as bitcoinlib_rpc
def pytest_collection_modifyitems(session, config, items):
"""Run contracts_test.py last."""
items[:] = list(reversed(items))
def pytest_generate_tests(metafunc):
"""Generate all py.test cases. Checks for different types of tests and creates proper context."""
if metafunc.function.__name__ == 'test_vector':
args = util_test.vector_to_args(UNITTEST_VECTOR, pytest.config.option.function)
metafunc.parametrize('tx_name, method, inputs, outputs, error, records', args)
elif metafunc.function.__name__ == 'test_scenario':
args = []
for scenario_name in INTEGRATION_SCENARIOS:
if pytest.config.option.scenario == [] or scenario_name in pytest.config.option.scenario:
args.append((scenario_name, INTEGRATION_SCENARIOS[scenario_name][1], INTEGRATION_SCENARIOS[scenario_name][0]))
metafunc.parametrize('scenario_name, base_scenario_name, transactions', args)
elif metafunc.function.__name__ == 'test_book':
if pytest.config.option.skiptestbook == 'all':
args = []
elif pytest.config.option.skiptestbook == 'testnet':
args = [False]
elif pytest.config.option.skiptestbook == 'mainnet':
args = [True]
else:
args = [True, False]
metafunc.parametrize('testnet', args)
def pytest_addoption(parser):
"""Add useful test suite argument options."""
parser.addoption("--function", action="append", default=[], help="list of functions to test")
parser.addoption("--scenario", action="append", default=[], help="list of scenarios to test")
parser.addoption("--gentxhex", action='store_true', default=False, help="generate and print unsigned hex for *.compose() tests")
parser.addoption("--savescenarios", action='store_true', default=False, help="generate sql dump and log in .new files")
parser.addoption("--skiptestbook", default='no', help="skip test book(s) (use with one of the following values: `all`, `testnet` or `mainnet`)")
@pytest.fixture(scope="module")
def rawtransactions_db(request):
"""Return a database object."""
db = apsw.Connection(util_test.CURR_DIR + '/fixtures/rawtransactions.db')
if (request.module.__name__ == 'integration_test'):
util_test.initialise_rawtransactions_db(db)
return db
@pytest.fixture(autouse=True)
def init_mock_functions(monkeypatch, rawtransactions_db):
"""Test suit mock functions.
Mock functions override default behaviour to allow test suit to work - for instance, date_passed is overwritten
so that every date will pass. Those are available to every test function in this suite."""
util_test.rawtransactions_db = rawtransactions_db
def get_unspent_txouts(address, unconfirmed=False, multisig_inputs=False):
with open(util_test.CURR_DIR + '/fixtures/unspent_outputs.json', 'r') as listunspent_test_file:
wallet_unspent = json.load(listunspent_test_file)
unspent_txouts = [output for output in wallet_unspent if output['address'] == address]
return unspent_txouts
def isodt(epoch_time):
return datetime.utcfromtimestamp(epoch_time).isoformat()
def curr_time():
return 0
def date_passed(date):
return False
def init_api_access_log():
pass
def pubkeyhash_to_pubkey(address, provided_pubkeys=None):
return DEFAULT_PARAMS['pubkey'][address]
def multisig_pubkeyhashes_to_pubkeys(address, provided_pubkeys=None):
# TODO: Should be updated?!
array = address.split('_')
signatures_required = int(array[0])
pubkeyhashes = array[1:-1]
pubkeys = [DEFAULT_PARAMS['pubkey'][pubkeyhash] for pubkeyhash in pubkeyhashes]
address = '_'.join([str(signatures_required)] + sorted(pubkeys) + [str(len(pubkeys))])
return address
def get_cached_raw_transaction(tx_hash, verbose=False):
return util_test.getrawtransaction(rawtransactions_db, bitcoinlib.core.lx(tx_hash))
util.CURRENT_BLOCK_INDEX = DEFAULT_PARAMS['default_block'] - 1
monkeypatch.setattr('counterpartylib.lib.backend.get_unspent_txouts', get_unspent_txouts)
monkeypatch.setattr('counterpartylib.lib.log.isodt', isodt)
monkeypatch.setattr('counterpartylib.lib.log.curr_time', curr_time)
monkeypatch.setattr('counterpartylib.lib.util.date_passed', date_passed)
monkeypatch.setattr('counterpartylib.lib.api.init_api_access_log', init_api_access_log)
if hasattr(config, 'PREFIX'):
monkeypatch.setattr('counterpartylib.lib.config.PREFIX', b'TESTXXXX')
monkeypatch.setattr('counterpartylib.lib.backend.getrawtransaction', get_cached_raw_transaction)
monkeypatch.setattr('counterpartylib.lib.backend.pubkeyhash_to_pubkey', pubkeyhash_to_pubkey)
monkeypatch.setattr('counterpartylib.lib.backend.multisig_pubkeyhashes_to_pubkeys', multisig_pubkeyhashes_to_pubkeys)
|
mit
|
aESeguridad/GERE
|
venv/lib/python2.7/site-packages/flask_weasyprint/__init__.py
|
1
|
7726
|
# coding: utf8
"""
flask_weasyprint
~~~~~~~~~~~~~~~~
Flask-WeasyPrint: Make PDF in your Flask app with WeasyPrint.
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
import weasyprint
from flask import request, current_app
from werkzeug.test import Client, ClientRedirectError
from werkzeug.wrappers import Response
try:
import urlparse
except ImportError: # Python 3
from urllib import parse as urlparse
try:
unicode
except NameError: # Python 3
unicode = str
VERSION = '0.5'
__all__ = ['VERSION', 'make_flask_url_dispatcher', 'make_url_fetcher',
'HTML', 'CSS', 'render_pdf']
DEFAULT_PORTS = frozenset([('http', 80), ('https', 443)])
def make_flask_url_dispatcher():
"""Return an URL dispatcher based on the current :ref:`request context
<flask:request-context>`.
You generally don’t need to call this directly.
The context is used when the dispatcher is first created but not
afterwards. It is not required after this function has returned.
Dispatch to the context’s app URLs below the context’s root URL.
If the app has a ``SERVER_NAME`` :ref:`config <flask:config>`, also
accept URLs that have that domain name or a subdomain thereof.
"""
def parse_netloc(netloc):
"""Return (hostname, port)."""
parsed = urlparse.urlsplit('http://' + netloc)
return parsed.hostname, parsed.port
app = current_app._get_current_object()
root_path = request.script_root
server_name = app.config.get('SERVER_NAME')
if server_name:
hostname, port = parse_netloc(server_name)
def accept(url):
"""Accept any URL scheme; also accept subdomains."""
return url.hostname is not None and (
url.hostname == hostname or
url.hostname.endswith('.' + hostname))
else:
scheme = request.scheme
hostname, port = parse_netloc(request.host)
if (scheme, port) in DEFAULT_PORTS:
port = None
def accept(url):
"""Do not accept subdomains."""
return (url.scheme, url.hostname) == (scheme, hostname)
def dispatch(url_string):
if isinstance(url_string, bytes):
url_string = url_string.decode('utf8')
url = urlparse.urlsplit(url_string)
url_port = url.port
if (url.scheme, url_port) in DEFAULT_PORTS:
url_port = None
if accept(url) and url_port == port and url.path.startswith(root_path):
netloc = url.netloc
if url.port and not url_port:
netloc = netloc.rsplit(':', 1)[0] # remove default port
base_url = '%s://%s%s' % (url.scheme, netloc, root_path)
path = url.path[len(root_path):]
if url.query:
path += '?' + url.query
# Ignore url.fragment
return app, base_url, path
return dispatch
def make_url_fetcher(dispatcher=None,
next_fetcher=weasyprint.default_url_fetcher):
"""Return an function suitable as a ``url_fetcher`` in WeasyPrint.
You generally don’t need to call this directly.
If ``dispatcher`` is not provided, :func:`make_flask_url_dispatcher`
is called to get one. This requires a request context.
Otherwise, it must be a callable that take an URL and return either
``None`` or a ``(wsgi_callable, base_url, path)`` tuple. For None
``next_fetcher`` is used. (By default, fetch normally over the network.)
For a tuple the request is made at the WSGI level.
``wsgi_callable`` must be a Flask application or another WSGI callable.
``base_url`` is the root URL for the application while ``path``
is the path within the application.
Typically ``base_url + path`` is equal or equivalent to the passed URL.
"""
if dispatcher is None:
dispatcher = make_flask_url_dispatcher()
def flask_url_fetcher(url):
redirect_chain = set()
while 1:
result = dispatcher(url)
if result is None:
return next_fetcher(url)
app, base_url, path = result
client = Client(app, response_wrapper=Response)
if isinstance(path, unicode):
# TODO: double-check this. Apparently Werzeug %-unquotes bytes
# but not Unicode URLs. (IRI vs. URI or something.)
path = path.encode('utf8')
response = client.get(path, base_url=base_url)
if response.status_code == 200:
return dict(
string=response.data,
mime_type=response.mimetype,
encoding=response.charset,
redirected_url=url)
# The test client can follow redirects, but do it ourselves
# to get access to the redirected URL.
elif response.status_code in (301, 302, 303, 305, 307):
redirect_chain.add(url)
url = response.location
if url in redirect_chain:
raise ClientRedirectError('loop detected')
else:
raise ValueError('Flask-WeasyPrint got HTTP status %s for %s%s'
% (response.status, base_url, path))
return flask_url_fetcher
def _wrapper(class_, *args, **kwargs):
if args:
guess = args[0]
args = args[1:]
else:
guess = kwargs.pop('guess', None)
if guess is not None and not hasattr(guess, 'read'):
# Assume a (possibly relative) URL
guess = urlparse.urljoin(request.url, guess)
if 'string' in kwargs and 'base_url' not in kwargs:
# Strings do not have an "intrinsic" base URL, use the request context.
kwargs['base_url'] = request.url
kwargs['url_fetcher'] = make_url_fetcher()
return class_(guess, *args, **kwargs)
def HTML(*args, **kwargs):
"""Like `weasyprint.HTML()
<http://weasyprint.org/using/#the-weasyprint-html-class>`_ but:
* :func:`make_url_fetcher` is used to create an ``url_fetcher``
* If ``guess`` is not a file object, it is an URL relative to the current
request context.
This means that you can just pass a result from :func:`flask.url_for`.
* If ``string`` is passed, ``base_url`` defaults to the current
request’s URL.
This requires a Flask request context.
"""
return _wrapper(weasyprint.HTML, *args, **kwargs)
def CSS(*args, **kwargs):
return _wrapper(weasyprint.CSS, *args, **kwargs)
CSS.__doc__ = HTML.__doc__.replace('HTML', 'CSS').replace('html', 'css')
def render_pdf(html, stylesheets=None, download_filename=None):
"""Render a PDF to a response with the correct ``Content-Type`` header.
:param html:
Either a :class:`weasyprint.HTML` object or an URL to be passed
to :func:`flask_weasyprint.HTML`. The latter case requires
a request context.
:param stylesheets:
A list of user stylesheets, passed to
:meth:`~weasyprint.HTML.write_pdf`
:param download_filename:
If provided, the ``Content-Disposition`` header is set so that most
web browser will show the "Save as…" dialog with the value as the
default filename.
:returns: a :class:`flask.Response` object.
"""
if not hasattr(html, 'write_pdf'):
html = HTML(html)
pdf = html.write_pdf(stylesheets=stylesheets)
response = current_app.response_class(pdf, mimetype='application/pdf')
if download_filename:
response.headers.add('Content-Disposition', 'attachment',
filename=download_filename)
return response
|
gpl-3.0
|
pombredanne/numba
|
numba/cuda/tests/cudapy/test_gufunc.py
|
2
|
10779
|
from __future__ import print_function, absolute_import
from numba import void, float32, float64
import numpy as np
import numpy.core.umath_tests as ut
from numba import guvectorize
from numba import cuda
from timeit import default_timer as time
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim
non_stream_speedups = []
stream_speedups = []
@skip_on_cudasim('ufunc API unsupported in the simulator')
class TestCUDAGufunc(unittest.TestCase):
def test_gufunc_small(self):
@guvectorize([void(float32[:, :], float32[:, :], float32[:, :])],
'(m,n),(n,p)->(m,p)',
target='cuda')
def matmulcore(A, B, C):
m, n = A.shape
n, p = B.shape
for i in range(m):
for j in range(p):
C[i, j] = 0
for k in range(n):
C[i, j] += A[i, k] * B[k, j]
gufunc = matmulcore
gufunc.max_blocksize = 512
matrix_ct = 2
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
5)
ts = time()
C = gufunc(A, B)
tcuda = time() - ts
ts = time()
Gold = ut.matrix_multiply(A, B)
tcpu = time() - ts
non_stream_speedups.append(tcpu / tcuda)
print(C, Gold)
self.assertTrue(np.allclose(C, Gold))
def test_gufunc_auto_transfer(self):
@guvectorize([void(float32[:, :], float32[:, :], float32[:, :])],
'(m,n),(n,p)->(m,p)',
target='cuda')
def matmulcore(A, B, C):
m, n = A.shape
n, p = B.shape
for i in range(m):
for j in range(p):
C[i, j] = 0
for k in range(n):
C[i, j] += A[i, k] * B[k, j]
gufunc = matmulcore
gufunc.max_blocksize = 512
matrix_ct = 2
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
5)
dB = cuda.to_device(B)
ts = time()
C = gufunc(A, dB).copy_to_host()
tcuda = time() - ts
ts = time()
Gold = ut.matrix_multiply(A, B)
tcpu = time() - ts
non_stream_speedups.append(tcpu / tcuda)
print(C, Gold)
self.assertTrue(np.allclose(C, Gold))
def test_gufunc(self):
@guvectorize([void(float32[:, :], float32[:, :], float32[:, :])],
'(m,n),(n,p)->(m,p)',
target='cuda')
def matmulcore(A, B, C):
m, n = A.shape
n, p = B.shape
for i in range(m):
for j in range(p):
C[i, j] = 0
for k in range(n):
C[i, j] += A[i, k] * B[k, j]
gufunc = matmulcore
gufunc.max_blocksize = 512
matrix_ct = 1001 # an odd number to test thread/block division in CUDA
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
5)
ts = time()
C = gufunc(A, B)
tcuda = time() - ts
ts = time()
Gold = ut.matrix_multiply(A, B)
tcpu = time() - ts
non_stream_speedups.append(tcpu / tcuda)
self.assertTrue(np.allclose(C, Gold))
def test_gufunc_hidim(self):
@guvectorize([void(float32[:, :], float32[:, :], float32[:, :])],
'(m,n),(n,p)->(m,p)',
target='cuda')
def matmulcore(A, B, C):
m, n = A.shape
n, p = B.shape
for i in range(m):
for j in range(p):
C[i, j] = 0
for k in range(n):
C[i, j] += A[i, k] * B[k, j]
gufunc = matmulcore
gufunc.max_blocksize = 512
matrix_ct = 100 # an odd number to test thread/block division in CUDA
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(4, 25, 2, 4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(4, 25, 4, 5)
ts = time()
C = gufunc(A, B)
tcuda = time() - ts
ts = time()
Gold = ut.matrix_multiply(A, B)
tcpu = time() - ts
non_stream_speedups.append(tcpu / tcuda)
self.assertTrue(np.allclose(C, Gold))
def test_gufunc_new_axis(self):
@guvectorize([void(float64[:, :], float64[:, :], float64[:, :])],
'(m,n),(n,p)->(m,p)',
target='cuda')
def matmulcore(A, B, C):
m, n = A.shape
n, p = B.shape
for i in range(m):
for j in range(p):
C[i, j] = 0
for k in range(n):
C[i, j] += A[i, k] * B[k, j]
gufunc = matmulcore
X = np.random.randn(10, 3, 3)
Y = np.random.randn(3, 3)
gold = ut.matrix_multiply(X, Y)
res1 = gufunc(X, Y)
np.testing.assert_allclose(gold, res1)
res2 = gufunc(X, np.tile(Y, (10, 1, 1)))
np.testing.assert_allclose(gold, res2)
def test_gufunc_adjust_blocksize(self):
@guvectorize([void(float32[:, :], float32[:, :], float32[:, :])],
'(m,n),(n,p)->(m,p)',
target='cuda')
def matmulcore(A, B, C):
m, n = A.shape
n, p = B.shape
for i in range(m):
for j in range(p):
C[i, j] = 0
for k in range(n):
C[i, j] += A[i, k] * B[k, j]
gufunc = matmulcore
gufunc.max_blocksize = 512
matrix_ct = 1001 # an odd number to test thread/block division in CUDA
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
5)
gufunc.max_blocksize = 32
C = gufunc(A, B)
Gold = ut.matrix_multiply(A, B)
self.assertTrue(np.allclose(C, Gold))
def test_gufunc_stream(self):
@guvectorize([void(float32[:, :], float32[:, :], float32[:, :])],
'(m,n),(n,p)->(m,p)',
target='cuda')
def matmulcore(A, B, C):
m, n = A.shape
n, p = B.shape
for i in range(m):
for j in range(p):
C[i, j] = 0
for k in range(n):
C[i, j] += A[i, k] * B[k, j]
gufunc = matmulcore
gufunc.max_blocksize = 512
#cuda.driver.flush_pending_free()
matrix_ct = 1001 # an odd number to test thread/block division in CUDA
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
5)
ts = time()
stream = cuda.stream()
dA = cuda.to_device(A, stream)
dB = cuda.to_device(B, stream)
dC = cuda.device_array(shape=(1001, 2, 5), dtype=A.dtype, stream=stream)
dC = gufunc(dA, dB, out=dC, stream=stream)
C = dC.copy_to_host(stream=stream)
stream.synchronize()
tcuda = time() - ts
ts = time()
Gold = ut.matrix_multiply(A, B)
tcpu = time() - ts
stream_speedups.append(tcpu / tcuda)
self.assertTrue(np.allclose(C, Gold))
def test_copy(self):
@guvectorize([void(float32[:], float32[:])],
'(x)->(x)',
target='cuda')
def copy(A, B):
for i in range(B.size):
B[i] = A[i]
A = np.arange(10, dtype=np.float32) + 1
B = np.zeros_like(A)
copy(A, out=B)
self.assertTrue(np.allclose(A, B))
def test_copy_odd(self):
@guvectorize([void(float32[:], float32[:])],
'(x)->(x)',
target='cuda')
def copy(A, B):
for i in range(B.size):
B[i] = A[i]
A = np.arange(11, dtype=np.float32) + 1
B = np.zeros_like(A)
copy(A, out=B)
self.assertTrue(np.allclose(A, B))
def test_copy2d(self):
@guvectorize([void(float32[:, :], float32[:, :])],
'(x, y)->(x, y)',
target='cuda')
def copy2d(A, B):
for x in range(B.shape[0]):
for y in range(B.shape[1]):
B[x, y] = A[x, y]
A = np.arange(30, dtype=np.float32).reshape(5, 6) + 1
B = np.zeros_like(A)
copy2d(A, out=B)
self.assertTrue(np.allclose(A, B))
def test_nopython_flag(self):
def foo(A, B):
pass
# nopython = True is fine
guvectorize([void(float32[:], float32[:])], '(x)->(x)', target='cuda',
nopython=True)(foo)
# nopython = False is bad
with self.assertRaises(TypeError) as raises:
guvectorize([void(float32[:], float32[:])], '(x)->(x)',
target='cuda', nopython=False)(foo)
self.assertEqual("nopython flag must be True", str(raises.exception))
def test_invalid_flags(self):
# Check invalid flags
def foo(A, B):
pass
with self.assertRaises(TypeError) as raises:
guvectorize([void(float32[:], float32[:])], '(x)->(x)',
target='cuda', what1=True, ever2=False)(foo)
head = "The following target options are not supported:"
msg = str(raises.exception)
self.assertEqual(msg[:len(head)], head)
items = msg[len(head):].strip().split(',')
items = [i.strip("'\" ") for i in items]
self.assertEqual(set(['what1', 'ever2']), set(items))
if __name__ == '__main__':
unittest.main()
|
bsd-2-clause
|
Samuel789/MediPi
|
MedManagementWeb/env/lib/python3.5/site-packages/django/contrib/gis/gdal/datasource.py
|
46
|
4682
|
"""
DataSource is a wrapper for the OGR Data Source object, which provides
an interface for reading vector geometry data from many different file
formats (including ESRI shapefiles).
When instantiating a DataSource object, use the filename of a
GDAL-supported data source. For example, a SHP file or a
TIGER/Line file from the government.
The ds_driver keyword is used internally when a ctypes pointer
is passed in directly.
Example:
ds = DataSource('/home/foo/bar.shp')
for layer in ds:
for feature in layer:
# Getting the geometry for the feature.
g = feature.geom
# Getting the 'description' field for the feature.
desc = feature['description']
# We can also increment through all of the fields
# attached to this feature.
for field in feature:
# Get the name of the field (e.g. 'description')
nm = field.name
# Get the type (integer) of the field, e.g. 0 => OFTInteger
t = field.type
# Returns the value the field; OFTIntegers return ints,
# OFTReal returns floats, all else returns string.
val = field.value
"""
from ctypes import byref
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.layer import Layer
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr__api_8h.html
#
# The OGR_DS_* routines are relevant here.
class DataSource(GDALBase):
"Wraps an OGR Data Source object."
destructor = capi.destroy_ds
def __init__(self, ds_input, ds_driver=False, write=False, encoding='utf-8'):
# The write flag.
if write:
self._write = 1
else:
self._write = 0
# See also http://trac.osgeo.org/gdal/wiki/rfc23_ogr_unicode
self.encoding = encoding
Driver.ensure_registered()
if isinstance(ds_input, six.string_types):
# The data source driver is a void pointer.
ds_driver = Driver.ptr_type()
try:
# OGROpen will auto-detect the data source type.
ds = capi.open_ds(force_bytes(ds_input), self._write, byref(ds_driver))
except GDALException:
# Making the error message more clear rather than something
# like "Invalid pointer returned from OGROpen".
raise GDALException('Could not open the datasource at "%s"' % ds_input)
elif isinstance(ds_input, self.ptr_type) and isinstance(ds_driver, Driver.ptr_type):
ds = ds_input
else:
raise GDALException('Invalid data source input type: %s' % type(ds_input))
if ds:
self.ptr = ds
self.driver = Driver(ds_driver)
else:
# Raise an exception if the returned pointer is NULL
raise GDALException('Invalid data source file "%s"' % ds_input)
def __iter__(self):
"Allows for iteration over the layers in a data source."
for i in range(self.layer_count):
yield self[i]
def __getitem__(self, index):
"Allows use of the index [] operator to get a layer at the index."
if isinstance(index, six.string_types):
layer = capi.get_layer_by_name(self.ptr, force_bytes(index))
if not layer:
raise OGRIndexError('invalid OGR Layer name given: "%s"' % index)
elif isinstance(index, int):
if index < 0 or index >= self.layer_count:
raise OGRIndexError('index out of range')
layer = capi.get_layer(self._ptr, index)
else:
raise TypeError('Invalid index type: %s' % type(index))
return Layer(layer, self)
def __len__(self):
"Returns the number of layers within the data source."
return self.layer_count
def __str__(self):
"Returns OGR GetName and Driver for the Data Source."
return '%s (%s)' % (self.name, str(self.driver))
@property
def layer_count(self):
"Returns the number of layers in the data source."
return capi.get_layer_count(self._ptr)
@property
def name(self):
"Returns the name of the data source."
name = capi.get_ds_name(self._ptr)
return force_text(name, self.encoding, strings_only=True)
|
apache-2.0
|
jannishuebl/electron
|
script/bump-version.py
|
135
|
3506
|
#!/usr/bin/env python
import os
import re
import sys
from lib.util import execute, get_atom_shell_version, parse_version, scoped_cwd
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
if len(sys.argv) != 2 or sys.argv[1] == '-h':
print 'Usage: bump-version.py [<version> | major | minor | patch]'
return 1
option = sys.argv[1]
increments = ['major', 'minor', 'patch', 'build']
if option in increments:
version = get_atom_shell_version()
versions = parse_version(version.split('-')[0])
versions = increase_version(versions, increments.index(option))
else:
versions = parse_version(option)
version = '.'.join(versions[:3])
with scoped_cwd(SOURCE_ROOT):
update_atom_gyp(version)
update_win_rc(version, versions)
update_version_h(versions)
update_info_plist(version)
tag_version(version)
def increase_version(versions, index):
for i in range(index + 1, 4):
versions[i] = '0'
versions[index] = str(int(versions[index]) + 1)
return versions
def update_atom_gyp(version):
pattern = re.compile(" *'version%' *: *'[0-9.]+'")
with open('atom.gyp', 'r') as f:
lines = f.readlines()
for i in range(0, len(lines)):
if pattern.match(lines[i]):
lines[i] = " 'version%': '{0}',\n".format(version)
with open('atom.gyp', 'w') as f:
f.write(''.join(lines))
return
def update_win_rc(version, versions):
pattern_fv = re.compile(' FILEVERSION [0-9,]+')
pattern_pv = re.compile(' PRODUCTVERSION [0-9,]+')
pattern_fvs = re.compile(' *VALUE "FileVersion", "[0-9.]+"')
pattern_pvs = re.compile(' *VALUE "ProductVersion", "[0-9.]+"')
win_rc = os.path.join('atom', 'browser', 'resources', 'win', 'atom.rc')
with open(win_rc, 'r') as f:
lines = f.readlines()
versions = [str(v) for v in versions]
for i in range(0, len(lines)):
line = lines[i]
if pattern_fv.match(line):
lines[i] = ' FILEVERSION {0}\r\n'.format(','.join(versions))
elif pattern_pv.match(line):
lines[i] = ' PRODUCTVERSION {0}\r\n'.format(','.join(versions))
elif pattern_fvs.match(line):
lines[i] = ' VALUE "FileVersion", "{0}"\r\n'.format(version)
elif pattern_pvs.match(line):
lines[i] = ' VALUE "ProductVersion", "{0}"\r\n'.format(version)
with open(win_rc, 'w') as f:
f.write(''.join(lines))
def update_version_h(versions):
version_h = os.path.join('atom', 'common', 'atom_version.h')
with open(version_h, 'r') as f:
lines = f.readlines()
for i in range(0, len(lines)):
line = lines[i]
if 'ATOM_MAJOR_VERSION' in line:
lines[i] = '#define ATOM_MAJOR_VERSION {0}\n'.format(versions[0])
lines[i + 1] = '#define ATOM_MINOR_VERSION {0}\n'.format(versions[1])
lines[i + 2] = '#define ATOM_PATCH_VERSION {0}\n'.format(versions[2])
with open(version_h, 'w') as f:
f.write(''.join(lines))
return
def update_info_plist(version):
info_plist = os.path.join('atom', 'browser', 'resources', 'mac', 'Info.plist')
with open(info_plist, 'r') as f:
lines = f.readlines()
for i in range(0, len(lines)):
line = lines[i]
if 'CFBundleVersion' in line:
lines[i + 1] = ' <string>{0}</string>\n'.format(version)
with open(info_plist, 'w') as f:
f.write(''.join(lines))
return
def tag_version(version):
execute(['git', 'commit', '-a', '-m', 'Bump v{0}'.format(version)])
if __name__ == '__main__':
sys.exit(main())
|
mit
|
olivierayache/xuggle-xuggler
|
captive/libx264/csrc/tools/digress/cli.py
|
145
|
4413
|
"""
Digress's CLI interface.
"""
import inspect
import sys
from optparse import OptionParser
import textwrap
from types import MethodType
from digress import __version__ as version
def dispatchable(func):
"""
Mark a method as dispatchable.
"""
func.digress_dispatchable = True
return func
class Dispatcher(object):
"""
Dispatcher for CLI commands.
"""
def __init__(self, fixture):
self.fixture = fixture
fixture.dispatcher = self
def _monkey_print_help(self, optparse, *args, **kwargs):
# monkey patches OptionParser._print_help
OptionParser.print_help(optparse, *args, **kwargs)
print >>sys.stderr, "\nAvailable commands:"
maxlen = max([ len(command_name) for command_name in self.commands ])
descwidth = 80 - maxlen - 4
for command_name, command_meth in self.commands.iteritems():
print >>sys.stderr, " %s %s\n" % (
command_name.ljust(maxlen + 1),
("\n" + (maxlen + 4) * " ").join(
textwrap.wrap(" ".join(filter(
None,
command_meth.__doc__.strip().replace("\n", " ").split(" ")
)),
descwidth
)
)
)
def _enable_flush(self):
self.fixture.flush_before = True
def _populate_parser(self):
self.commands = self._get_commands()
self.optparse = OptionParser(
usage = "usage: %prog [options] command [args]",
description = "Digress CLI frontend for %s." % self.fixture.__class__.__name__,
version = "Digress %s" % version
)
self.optparse.print_help = MethodType(self._monkey_print_help, self.optparse, OptionParser)
self.optparse.add_option(
"-f",
"--flush",
action="callback",
callback=lambda option, opt, value, parser: self._enable_flush(),
help="flush existing data for a revision before testing"
)
self.optparse.add_option(
"-c",
"--cases",
metavar="FOO,BAR",
action="callback",
dest="cases",
type=str,
callback=lambda option, opt, value, parser: self._select_cases(*value.split(",")),
help="test cases to run, run with command list to see full list"
)
def _select_cases(self, *cases):
self.fixture.cases = filter(lambda case: case.__name__ in cases, self.fixture.cases)
def _get_commands(self):
commands = {}
for name, member in inspect.getmembers(self.fixture):
if hasattr(member, "digress_dispatchable"):
commands[name] = member
return commands
def _run_command(self, name, *args):
if name not in self.commands:
print >>sys.stderr, "error: %s is not a valid command\n" % name
self.optparse.print_help()
return
command = self.commands[name]
argspec = inspect.getargspec(command)
max_arg_len = len(argspec.args) - 1
min_arg_len = max_arg_len - ((argspec.defaults is not None) and len(argspec.defaults) or 0)
if len(args) < min_arg_len:
print >>sys.stderr, "error: %s takes at least %d arguments\n" % (
name,
min_arg_len
)
print >>sys.stderr, "%s\n" % command.__doc__
self.optparse.print_help()
return
if len(args) > max_arg_len:
print >>sys.stderr, "error: %s takes at most %d arguments\n" % (
name,
max_arg_len
)
print >>sys.stderr, "%s\n" % command.__doc__
self.optparse.print_help()
return
command(*args)
def pre_dispatch(self):
pass
def dispatch(self):
self._populate_parser()
self.optparse.parse_args()
self.pre_dispatch()
args = self.optparse.parse_args()[1] # arguments may require reparsing after pre_dispatch; see test_x264.py
if len(args) == 0:
print >>sys.stderr, "error: no comamnd specified\n"
self.optparse.print_help()
return
command = args[0]
addenda = args[1:]
self._run_command(command, *addenda)
|
lgpl-3.0
|
MiyamotoAkira/kivy
|
examples/canvas/repeat_texture.py
|
40
|
1597
|
'''
Repeat Texture on Resize
========================
This examples repeats the letter 'K' (mtexture1.png) 64 times in a window.
You should see 8 rows and 8 columns of white K letters, along a label
showing the current size. As you resize the window, it stays an 8x8.
This example includes a label with a colored background.
Note the image mtexture1.png is a white 'K' on a transparent background, which
makes it hard to see.
'''
from kivy.app import App
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.properties import ObjectProperty, ListProperty
from kivy.lang import Builder
kv = '''
<LabelOnBackground>:
canvas.before:
Color:
rgb: self.background
Rectangle:
pos: self.pos
size: self.size
FloatLayout:
canvas.before:
Color:
rgb: 1, 1, 1
Rectangle:
pos: self.pos
size: self.size
texture: app.texture
LabelOnBackground:
text: '{} (try to resize the window)'.format(root.size)
color: (0.4, 1, 1, 1)
background: (.3, .3, .3)
pos_hint: {'center_x': .5, 'center_y': .5 }
size_hint: None, None
height: 30
width: 250
'''
class LabelOnBackground(Label):
background = ListProperty((0.2, 0.2, 0.2))
class RepeatTexture(App):
texture = ObjectProperty()
def build(self):
self.texture = Image(source='mtexture1.png').texture
self.texture.wrap = 'repeat'
self.texture.uvsize = (8, 8)
return Builder.load_string(kv)
RepeatTexture().run()
|
mit
|
marty331/jakesclock
|
flask/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/pyodbc.py
|
59
|
2665
|
# mysql/pyodbc.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mysql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
.. note:: The PyODBC for MySQL dialect is not well supported, and
is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25).
Other dialects for MySQL are recommended.
"""
from .base import MySQLDialect, MySQLExecutionContext
from ...connectors.pyodbc import PyODBCConnector
from ... import util
import re
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
supports_unicode_statements = False
execution_ctx_cls = MySQLExecutionContext_pyodbc
pyodbc_driver_name = "MySQL"
def __init__(self, **kw):
# deal with http://code.google.com/p/pyodbc/issues/detail?id=25
kw.setdefault('convert_unicode', True)
super(MySQLDialect_pyodbc, self).__init__(**kw)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. "
"Assuming latin1.")
return 'latin1'
def _extract_error_code(self, exception):
m = re.compile(r"\((\d+)\)").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
else:
return None
dialect = MySQLDialect_pyodbc
|
gpl-2.0
|
awifi-dev/android_kernel_lge_awifi
|
tools/perf/scripts/python/futex-contention.py
|
11261
|
1486
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
ThomasHabets/python-pyhsm
|
examples/yhsm-monitor-exit.py
|
1
|
1480
|
#!/usr/bin/env python
#
# Copyright (c) 2011, Yubico AB
# All rights reserved.
#
# Utility to send a MONITOR_EXIT command to a YubiHSM.
#
# MONITOR_EXIT only works if the YubiHSM is in debug mode. It would
# be a security problem to allow remote reconfiguration of a production
# YubiHSM.
#
# If your YubiHSM is not in debug mode, enter configuration mode by
# pressing the small button while inserting the YubiHSM in the USB port.
#
import sys
sys.path.append('Lib');
import pyhsm
device = "/dev/ttyACM0"
# simplified arguments parsing
d_argv = dict.fromkeys(sys.argv)
debug = d_argv.has_key('-v')
raw = d_argv.has_key('-v')
if d_argv.has_key('-h'):
sys.stderr.write("Syntax: %s [-v] [-R]\n" % (sys.argv[0]))
sys.stderr.write("\nOptions :\n")
sys.stderr.write(" -v verbose\n")
sys.stderr.write(" -R raw MONITOR_EXIT command\n")
sys.exit(0)
res = 0
try:
s = pyhsm.base.YHSM(device=device, debug = debug)
if raw:
# No initialization
s.write('\x7f\xef\xbe\xad\xba\x10\x41\x52\x45')
else:
print "Version: %s" % s.info()
s.monitor_exit()
print "Exited monitor-mode (maybe)"
if raw:
print "s.stick == %s" % s.stick
print "s.stick.ser == %s" % s.stick.ser
for _ in xrange(3):
s.stick.ser.write("\n")
line = s.stick.ser.readline()
print "%s" % (line)
except pyhsm.exception.YHSM_Error, e:
print "ERROR: %s" % e
res = 1
sys.exit(res)
|
bsd-2-clause
|
o3project/ryu-oe
|
ryu/controller/event.py
|
43
|
1110
|
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EventBase(object):
# Nothing yet
pass
class EventRequestBase(EventBase):
def __init__(self):
super(EventRequestBase, self).__init__()
self.dst = None # app.name of provide the event.
self.src = None
self.sync = False
self.reply_q = None
class EventReplyBase(EventBase):
def __init__(self, dst):
super(EventReplyBase, self).__init__()
self.dst = dst
|
apache-2.0
|
omor1/linux-430
|
scripts/gdb/linux/modules.py
|
774
|
2718
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# module tools
#
# Copyright (c) Siemens AG, 2013
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import cpus, utils
module_type = utils.CachedType("struct module")
def module_list():
global module_type
module_ptr_type = module_type.get_type().pointer()
modules = gdb.parse_and_eval("modules")
entry = modules['next']
end_of_list = modules.address
while entry != end_of_list:
yield utils.container_of(entry, module_ptr_type, "list")
entry = entry['next']
def find_module_by_name(name):
for module in module_list():
if module['name'].string() == name:
return module
return None
class LxModule(gdb.Function):
"""Find module by name and return the module variable.
$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules
of the target and return that module variable which MODULE matches."""
def __init__(self):
super(LxModule, self).__init__("lx_module")
def invoke(self, mod_name):
mod_name = mod_name.string()
module = find_module_by_name(mod_name)
if module:
return module.dereference()
else:
raise gdb.GdbError("Unable to find MODULE " + mod_name)
LxModule()
class LxLsmod(gdb.Command):
"""List currently loaded modules."""
_module_use_type = utils.CachedType("struct module_use")
def __init__(self):
super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
gdb.write(
"Address{0} Module Size Used by\n".format(
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
address=str(module['module_core']).split()[0],
name=module['name'].string(),
size=str(module['core_size']),
ref=str(module['refcnt']['counter'])))
source_list = module['source_list']
t = self._module_use_type.get_type().pointer()
entry = source_list['next']
first = True
while entry != source_list.address:
use = utils.container_of(entry, t, "source_list")
gdb.write("{separator}{name}".format(
separator=" " if first else ",",
name=use['source']['name'].string()))
first = False
entry = entry['next']
gdb.write("\n")
LxLsmod()
|
gpl-2.0
|
darvin/qtdjango
|
src/qtdjango/settings.py
|
1
|
5503
|
# -*- coding: utf-8 -*-
from qtdjango.helpers import test_connection
__author__ = 'darvin'
from qtdjango.connection import *
__author__ = 'darvin'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class BooleanEdit(QCheckBox):
def text(self):
return QVariant(self.checkState()).toString()
def setText(self, text):
self.setChecked(QVariant(text).toBool())
class SettingsDialog(QDialog):
widgets_table = [
# (name, caption, widget object, default value),
("address", u"Адрес сервера", QLineEdit, "http://127.0.0.1:8000"),
("api_path", u"Путь к api сервера", QLineEdit, "/api/"),
("server_package", u"Название пакета сервера", QLineEdit, "none"),
("login", u"Ваш логин", QLineEdit, ""),
("password", u"Ваш пароль", QLineEdit, ""),
("open_links_in_external_browser", \
u"Открывать ссылки из окна информации во внешнем браузере", BooleanEdit, True),
]
def __init__(self, parent=None, error_message=None, models_manager=None):
super(SettingsDialog, self).__init__(parent)
self.setWindowTitle(u"Настройки")
self.setModal(True)
self.formlayout = QFormLayout()
self.models_manager = models_manager
self.settings = QSettings()
self.message_widget = QLabel()
self.__widgets = []
for name, caption, widget_class, default in self.widgets_table:
self.__widgets.append((name, caption, widget_class(), default))
for name, caption, widget, default in self.__widgets:
self.formlayout.addRow(caption, widget)
widget.setText(self.settings.value(name, default).toString())
self.formlayout.addRow(self.message_widget)
if error_message is not None:
self.message(**error_message)
buttonBox = QDialogButtonBox(QDialogButtonBox.Save\
| QDialogButtonBox.Cancel |QDialogButtonBox.RestoreDefaults)
testButton = QPushButton(u"Тестировать соединение")
buttonBox.addButton(testButton, QDialogButtonBox.ActionRole)
testButton.clicked.connect(self.test)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
buttonBox.button(QDialogButtonBox.RestoreDefaults).clicked.connect(self.restore)
self.formlayout.addRow(buttonBox)
self.setLayout(self.formlayout)
def accept(self):
if self.test():
for name, caption, widget, default in self.__widgets:
self.settings.setValue(name, widget.text())
self.models_manager.set_connection_params(\
self.get_value("address"), \
self.get_value("api_path"), \
self.get_value("login"),\
self.get_value("password"))
QDialog.accept(self)
def restore(self):
for name, caption, widget, default in self.__widgets:
widget.setText(default)
def message(self, text, error=False, works=False, fields=[]):
self.message_widget.setText(text)
if error:
color = "red"
elif works:
color = "green"
else:
color = "black"
css = "QLabel { color : %s; }" % color
self.message_widget.setStyleSheet(css)
for name, caption, widget, default in self.__widgets:
self.formlayout.labelForField(widget).setStyleSheet("")
if name in fields:
self.formlayout.labelForField(widget).setStyleSheet(css)
def get_value(self, name):
return unicode(self.settings.value(name).toString())
def test(self):
s = {}
for name, caption, widget, default in self.__widgets:
s[name] = unicode(widget.text())
try:
remote_version = test_connection(s["address"],s["api_path"],s["login"],s["password"])
import qtdjango
if qtdjango.__version__==remote_version:
self.message(text=u"Удаленный сервер настроен правильно!", works=True)
return True
elif remote_version is not None:
self.message(u"Версия системы на удаленном сервере отличается от\
версии системы на клиенте")
return True
except SocketError:
self.message(text=u"Ошибка при подключении к удаленному серверу", error=True, fields=\
("address",))
except ServerNotFoundError:
self.message(text=u"Удаленный сервер недоступен", error=True, fields=\
("address",))
except NotQtDjangoResponceError:
self.message(text=u"Не правильно настроен путь на удаленном сервере или \
удаленный сервер не является сервером системы", error=True, fields=\
("address","api_path"))
except AuthError:
self.message(text=u"Неверное имя пользователя или пароль", error=True, fields=\
("login","password"))
return False
|
gpl-2.0
|
person142/scipy
|
scipy/interpolate/tests/test_gil.py
|
9
|
1882
|
import itertools
import threading
import time
import numpy as np
from numpy.testing import assert_equal
import pytest
import scipy.interpolate
class TestGIL(object):
"""Check if the GIL is properly released by scipy.interpolate functions."""
def setup_method(self):
self.messages = []
def log(self, message):
self.messages.append(message)
def make_worker_thread(self, target, args):
log = self.log
class WorkerThread(threading.Thread):
def run(self):
log('interpolation started')
target(*args)
log('interpolation complete')
return WorkerThread()
@pytest.mark.slow
@pytest.mark.xfail(reason='race conditions, may depend on system load')
def test_rectbivariatespline(self):
def generate_params(n_points):
x = y = np.linspace(0, 1000, n_points)
x_grid, y_grid = np.meshgrid(x, y)
z = x_grid * y_grid
return x, y, z
def calibrate_delay(requested_time):
for n_points in itertools.count(5000, 1000):
args = generate_params(n_points)
time_started = time.time()
interpolate(*args)
if time.time() - time_started > requested_time:
return args
def interpolate(x, y, z):
scipy.interpolate.RectBivariateSpline(x, y, z)
args = calibrate_delay(requested_time=3)
worker_thread = self.make_worker_thread(interpolate, args)
worker_thread.start()
for i in range(3):
time.sleep(0.5)
self.log('working')
worker_thread.join()
assert_equal(self.messages, [
'interpolation started',
'working',
'working',
'working',
'interpolation complete',
])
|
bsd-3-clause
|
Awesomeomics/webserver
|
env/lib/python2.7/site-packages/jinja2/debug.py
|
620
|
10980
|
# -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from types import TracebackType
from jinja2.utils import missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
from jinja2._compat import iteritems, reraise, code_type
# on pypy we can take advantage of transparent proxies
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
# how does the raise helper look like?
try:
exec("raise TypeError, 'foo'")
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
self._tb_next = None
@property
def tb_next(self):
return self._tb_next
def set_next(self, next):
if tb_set_next is not None:
try:
tb_set_next(self.tb, next and next.tb or None)
except Exception:
# this function can fail due to all the hackery it does
# on various python implementations. We just catch errors
# down and ignore them if necessary.
pass
self._tb_next = next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
def make_frame_proxy(frame):
proxy = TracebackFrameProxy(frame)
if tproxy is None:
return proxy
def operation_handler(operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
return getattr(proxy, args[0])
elif operation == '__setattr__':
proxy.__setattr__(*args, **kwargs)
else:
return getattr(proxy, operation)(*args, **kwargs)
return tproxy(TracebackType, operation_handler)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for printing or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
# newly concatenate the frames (which are proxies)
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.set_next(tb)
prev_tb = tb
prev_tb.set_next(None)
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
tb = self.frames[0]
# the frame will be an actual traceback (or transparent proxy) if
# we are on pypy or a python implementation with support for tproxy
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in range(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
reraise(exc_info[0], exc_info[1], exc_info[2])
return ProcessedTraceback(exc_info[0], exc_info[1], frames)
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
real_locals = tb.tb_frame.f_locals.copy()
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
for name, value in iteritems(real_locals):
if name.startswith('l_') and value is not missing:
locals[name[2:]] = value
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
code = code_type(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except:
pass
# execute the code and catch the new traceback
try:
exec(code, globals, locals)
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object. Do not attempt to use this on non cpython
interpreters
"""
import ctypes
from types import TracebackType
# figure out side of _Py_ssize_t
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if hasattr(sys, 'getobjects'):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation if we don't have transparent
# proxies.
tb_set_next = None
if tproxy is None:
try:
tb_set_next = _init_ugly_crap()
except:
pass
del _init_ugly_crap
|
mit
|
kiyoto/statsmodels
|
statsmodels/api.py
|
2
|
1484
|
from . import iolib
from . import datasets
from . import tools
from .tools.tools import add_constant, categorical
from . import regression
from .regression.linear_model import OLS, GLS, WLS, GLSAR
from .regression.quantile_regression import QuantReg
from .regression.mixed_linear_model import MixedLM
from .genmod import api as genmod
from .genmod.api import GLM, GEE, OrdinalGEE, NominalGEE, families, cov_struct
from . import robust
from .robust.robust_linear_model import RLM
from .discrete.discrete_model import (Poisson, Logit, Probit,
MNLogit, NegativeBinomial)
from .tsa import api as tsa
from .duration.hazard_regression import PHReg
from .nonparametric import api as nonparametric
from . import distributions
from .__init__ import test
from . import version
from .info import __doc__
from .graphics.gofplots import qqplot, qqplot_2samples, qqline, ProbPlot
from .graphics import api as graphics
from .stats import api as stats
from .emplike import api as emplike
from .duration import api as duration
from .formula import api as formula
from .iolib.smpickle import load_pickle as load
from .tools.pca import PCA
from .tools.print_version import show_versions
from .tools.web import webdoc
import os
chmpath = os.path.join(os.path.dirname(__file__), 'statsmodelsdoc.chm')
if os.path.exists(chmpath):
def open_help(chmpath=chmpath):
from subprocess import Popen
p = Popen(chmpath, shell=True)
del os
del chmpath
|
bsd-3-clause
|
rodriguezdevera/sakai
|
reference/library/src/webapp/editor/FCKeditor/editor/filemanager/connectors/py/fckconnector.py
|
131
|
2686
|
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Base Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import cgi, os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
import config as Config
class FCKeditorConnectorBase( object ):
"The base connector class. Subclass it to extend functionality (see Zope example)"
def __init__(self, environ=None):
"Constructor: Here you should parse request fields, initialize variables, etc."
self.request = FCKeditorRequest(environ) # Parse request
self.headers = [] # Clean Headers
if environ:
self.environ = environ
else:
self.environ = os.environ
# local functions
def setHeader(self, key, value):
self.headers.append ((key, value))
return
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, environ):
if environ: # WSGI
self.request = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ,
keep_blank_values=1)
self.environ = environ
else: # plain old cgi
self.environ = os.environ
self.request = cgi.FieldStorage()
if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ:
if self.environ['REQUEST_METHOD'].upper()=='POST':
# we are in a POST, but GET query_string exists
# cgi parses by default POST data, so parse GET QUERY_STRING too
self.get_request = cgi.FieldStorage(fp=None,
environ={
'REQUEST_METHOD':'GET',
'QUERY_STRING':self.environ['QUERY_STRING'],
},
)
else:
self.get_request={}
def has_key(self, key):
return self.request.has_key(key) or self.get_request.has_key(key)
def get(self, key, default=None):
if key in self.request.keys():
field = self.request[key]
elif key in self.get_request.keys():
field = self.get_request[key]
else:
return default
if hasattr(field,"filename") and field.filename: #file upload, do not convert return value
return field
else:
return field.value
|
apache-2.0
|
bonitadecker77/python-for-android
|
python-modules/twisted/twisted/trial/test/test_output.py
|
61
|
5167
|
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the output generated by trial.
"""
import os, StringIO
from twisted.scripts import trial
from twisted.trial import runner
from twisted.trial.test import packages
def runTrial(*args):
from twisted.trial import reporter
config = trial.Options()
config.parseOptions(args)
output = StringIO.StringIO()
myRunner = runner.TrialRunner(
reporter.VerboseTextReporter,
stream=output,
workingDirectory=config['temp-directory'])
suite = trial._getSuite(config)
result = myRunner.run(suite)
return output.getvalue()
class TestImportErrors(packages.SysPathManglingTest):
"""Actually run trial as if on the command line and check that the output
is what we expect.
"""
debug = False
parent = "_testImportErrors"
def runTrial(self, *args):
return runTrial('--temp-directory', self.mktemp(), *args)
def _print(self, stuff):
print stuff
return stuff
def failUnlessIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(TestImportErrors, self).failUnlessIn(
containee, container, *args, **kwargs)
return container
def failIfIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(TestImportErrors, self).failIfIn(
containee, container, *args, **kwargs)
return container
def test_trialRun(self):
self.runTrial()
def test_nonexistentModule(self):
d = self.runTrial('twisted.doesntexist')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'twisted.doesntexist')
return d
def test_nonexistentPackage(self):
d = self.runTrial('doesntexist')
self.failUnlessIn(d, 'doesntexist')
self.failUnlessIn(d, 'ModuleNotFound')
self.failUnlessIn(d, '[ERROR]')
return d
def test_nonexistentPackageWithModule(self):
d = self.runTrial('doesntexist.barney')
self.failUnlessIn(d, 'doesntexist.barney')
self.failUnlessIn(d, 'ObjectNotFound')
self.failUnlessIn(d, '[ERROR]')
return d
def test_badpackage(self):
d = self.runTrial('badpackage')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'badpackage')
self.failIfIn(d, 'IOError')
return d
def test_moduleInBadpackage(self):
d = self.runTrial('badpackage.test_module')
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "badpackage.test_module")
self.failIfIn(d, 'IOError')
return d
def test_badmodule(self):
d = self.runTrial('package.test_bad_module')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package.test_bad_module')
self.failIfIn(d, 'IOError')
self.failIfIn(d, '<module ')
return d
def test_badimport(self):
d = self.runTrial('package.test_import_module')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package.test_import_module')
self.failIfIn(d, 'IOError')
self.failIfIn(d, '<module ')
return d
def test_recurseImport(self):
d = self.runTrial('package')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'test_bad_module')
self.failUnlessIn(d, 'test_import_module')
self.failIfIn(d, '<module ')
self.failIfIn(d, 'IOError')
return d
def test_recurseImportErrors(self):
d = self.runTrial('package2')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package2')
self.failUnlessIn(d, 'test_module')
self.failUnlessIn(d, "No module named frotz")
self.failIfIn(d, '<module ')
self.failIfIn(d, 'IOError')
return d
def test_nonRecurseImportErrors(self):
d = self.runTrial('-N', 'package2')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, "No module named frotz")
self.failIfIn(d, '<module ')
return d
def test_regularRun(self):
d = self.runTrial('package.test_module')
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
def test_filename(self):
self.mangleSysPath(self.oldPath)
d = self.runTrial(
os.path.join(self.parent, 'package', 'test_module.py'))
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
def test_dosFile(self):
## XXX -- not really an output test, more of a script test
self.mangleSysPath(self.oldPath)
d = self.runTrial(
os.path.join(self.parent,
'package', 'test_dos_module.py'))
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
|
apache-2.0
|
barbarahui/nuxeo-calisphere
|
s3stash/nxstash_mediajson.py
|
1
|
4444
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
from s3stash.nxstashref import NuxeoStashRef
from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo
from deepharvest.mediajson import MediaJson
from dplaingestion.mappers.ucldc_nuxeo_mapper import UCLDCNuxeoMapper
import json
import s3stash.s3tools
FILENAME_FORMAT = "{}-media.json"
class NuxeoStashMediaJson(NuxeoStashRef):
''' create and stash media.json file for a nuxeo object '''
def __init__(self,
path,
bucket,
region,
pynuxrc='~/.pynuxrc',
replace=True,
**kwargs):
super(NuxeoStashMediaJson, self).__init__(path, bucket, region,
pynuxrc, replace, **kwargs)
self.dh = DeepHarvestNuxeo(
self.path, self.bucket, pynuxrc=self.pynuxrc)
self.mj = MediaJson()
self.filename = FILENAME_FORMAT.format(self.uid)
self.filepath = os.path.join(self.tmp_dir, self.filename)
self._update_report('filename', self.filename)
self._update_report('filepath', self.filepath)
def nxstashref(self):
return self.nxstash_mediajson()
def nxstash_mediajson(self):
''' create media.json file for object and stash on s3 '''
self._update_report('stashed', False)
# extract and transform metadata for parent obj and any components
parent_md = self._get_parent_metadata(self.metadata)
component_md = [
self._get_component_metadata(c)
for c in self.dh.fetch_components(self.metadata)
]
# create media.json file
media_json = self.mj.create_media_json(parent_md, component_md)
self._write_file(media_json, self.filepath)
# stash media.json file on s3
stashed, s3_report = s3stash.s3tools.s3stash(
self.filepath, self.bucket, self.filename, self.region,
'application/json', self.replace)
self._update_report('s3_stash', s3_report)
self._update_report('stashed', stashed)
self._remove_tmp()
return self.report
def _get_parent_metadata(self, obj):
''' assemble top-level (parent) object metadata '''
metadata = {}
metadata['label'] = obj['title']
# only provide id, href, format if Nuxeo Document has file attached
full_metadata = self.nx.get_metadata(uid=obj['uid'])
if self.dh.has_file(full_metadata):
metadata['id'] = obj['uid']
metadata['href'] = self.dh.get_object_download_url(full_metadata)
metadata['format'] = self.dh.get_calisphere_object_type(obj[
'type'])
if metadata['format'] == 'video':
metadata['dimensions'] = self.dh.get_video_dimensions(
full_metadata)
return metadata
def _get_component_metadata(self, obj):
''' assemble component object metadata '''
metadata = {}
full_metadata = self.nx.get_metadata(uid=obj['uid'])
metadata['label'] = obj['title']
metadata['id'] = obj['uid']
metadata['href'] = self.dh.get_object_download_url(full_metadata)
# extract additional ucldc metadata from 'properties' element
ucldc_md = self._get_ucldc_schema_properties(full_metadata)
for key, value in ucldc_md.iteritems():
metadata[key] = value
# map 'type'
metadata['format'] = self.dh.get_calisphere_object_type(obj['type'])
return metadata
def _get_ucldc_schema_properties(self, metadata):
''' get additional metadata as mapped by harvester '''
properties = {}
mapper = UCLDCNuxeoMapper(metadata)
mapper.map_original_record()
mapper.map_source_resource()
properties = mapper.mapped_data['sourceResource']
properties.update(mapper.mapped_data['originalRecord'])
return properties
def _write_file(self, content_dict, filepath):
""" convert dict to json and write to file """
content_json = json.dumps(
content_dict, indent=4, separators=(',', ': '), sort_keys=False)
with open(filepath, 'wb') as f:
f.write(content_json)
f.flush()
def main(argv=None):
pass
if __name__ == "__main__":
sys.exit(main())
|
bsd-3-clause
|
jhseu/tensorflow
|
tensorflow/python/keras/callbacks_v1.py
|
9
|
19005
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import profiler
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=['keras.callbacks.TensorBoard'])
class TensorBoard(callbacks.Callback):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network for histograms
computation.
write_images: whether to write model weights to visualize as image in
TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding layers
will be saved. If set to 0, embeddings won't be computed. Data to be
visualized in TensorBoard's Embedding tab must be passed as
`embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If None
or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
embeddings_data: data to be embedded at layers specified in
`embeddings_layer_names`. Numpy array (if the model has a single input)
or list of Numpy arrays (if the model has multiple inputs). Learn [more
about
embeddings](https://www.tensorflow.org/programmers_guide/embedding)
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
@compatibility(eager)
Using the `TensorBoard` callback will work when eager execution is enabled,
with the restriction that outputting histogram summaries of weights and
gradients is not supported. Consequently, `histogram_freq` will be ignored.
@end_compatibility
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq='epoch',
profile_batch=2):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
if self.histogram_freq and context.executing_eagerly():
logging.warning(
UserWarning('Weight and gradient histograms not supported for eager'
'execution, setting `histogram_freq` to `0`.'))
self.histogram_freq = 0
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.batch_size = batch_size
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata
self.embeddings_data = embeddings_data
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self._samples_seen = 0
self._samples_seen_at_last_write = 0
# TODO(fishx): Add a link to the full profiler tutorial.
self._profile_batch = profile_batch
# One profiler session is running if it is True.
self._is_profiling = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _init_writer(self, model):
"""Sets file writer."""
if context.executing_eagerly():
self.writer = summary_ops_v2.create_file_writer(self.log_dir)
if not model.run_eagerly and self.write_graph:
with self.writer.as_default():
summary_ops_v2.graph(K.get_graph(), step=0)
elif self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, K.get_graph())
else:
self.writer = tf_summary.FileWriter(self.log_dir)
def _make_histogram_ops(self, model):
"""Defines histogram ops when histogram_freq > 0."""
# only make histogram summary op if it hasn't already been made
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf_summary.histogram(mapped_weight_name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(mapped_weight_name, w_img)
if self.write_grads:
for weight in layer.trainable_weights:
mapped_weight_name = weight.name.replace(':', '_')
grads = model.optimizer.get_gradients(model.total_loss, weight)
def is_indexed_slices(grad):
return type(grad).__name__ == 'IndexedSlices'
grads = [
grad.values if is_indexed_slices(grad) else grad
for grad in grads
]
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
if isinstance(layer.output, list):
for i, output in enumerate(layer.output):
tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
else:
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
def set_model(self, model):
"""Sets Keras model and creates summary ops."""
self.model = model
self._init_writer(model)
# histogram summaries only enabled in graph mode
if not context.executing_eagerly():
self._make_histogram_ops(model)
self.merged = tf_summary.merge_all()
# If both embedding_freq and embeddings_data are available, we will
# visualize embeddings.
if self.embeddings_freq and self.embeddings_data is not None:
# Avoid circular dependency.
from tensorflow.python.keras.engine import training_utils # pylint: disable=g-import-not-at-top
self.embeddings_data = training_utils.standardize_input_data(
self.embeddings_data, model.input_names)
# If embedding_layer_names are not provided, get all of the embedding
# layers from the model.
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name
for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
self.assign_embeddings = []
embeddings_vars = {}
self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
self.step = step = array_ops.placeholder(dtypes.int32)
for layer in self.model.layers:
if layer.name in embeddings_layer_names:
embedding_input = self.model.get_layer(layer.name).output
embedding_size = np.prod(embedding_input.shape[1:])
embedding_input = array_ops.reshape(embedding_input,
(step, int(embedding_size)))
shape = (self.embeddings_data[0].shape[0], int(embedding_size))
embedding = variables.Variable(
array_ops.zeros(shape), name=layer.name + '_embedding')
embeddings_vars[layer.name] = embedding
batch = state_ops.assign(embedding[batch_id:batch_id + step],
embedding_input)
self.assign_embeddings.append(batch)
self.saver = saver.Saver(list(embeddings_vars.values()))
# Create embeddings_metadata dictionary
if isinstance(self.embeddings_metadata, str):
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings_vars.keys()
}
else:
# If embedding_metadata is already a dictionary
embeddings_metadata = self.embeddings_metadata
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
# TODO(psv): Add integration tests to test embedding visualization
# with TensorBoard callback. We are unable to write a unit test for this
# because TensorBoard dependency assumes TensorFlow package is installed.
config = projector.ProjectorConfig()
for layer_name, tensor in embeddings_vars.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if (embeddings_metadata is not None and
layer_name in embeddings_metadata):
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def _fetch_callback(self, summary):
self.writer.add_summary(summary, self._total_val_batches_seen)
self._total_val_batches_seen += 1
def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries.
Arguments:
step: the global step to use for TensorBoard.
logs: dict. Keys are scalar summary names, values are
NumPy scalars.
"""
logs = logs or {}
if context.executing_eagerly():
# use v2 summary ops
with self.writer.as_default(), summary_ops_v2.always_record_summaries():
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary_ops_v2.scalar(name, value, step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
batch_logs = {('batch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
self._write_custom_summaries(self._total_batches_seen, batch_logs)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
if self._is_profiling:
profiler.save(self.log_dir, profiler.stop())
self._is_profiling = False
elif (not self._is_profiling and
self._total_batches_seen == self._profile_batch - 1):
profiler.start()
self._is_profiling = True
def on_train_begin(self, logs=None):
if self._profile_batch == 1:
profiler.start()
self._is_profiling = True
def on_epoch_begin(self, epoch, logs=None):
"""Add histogram op to Model eval_function callbacks, reset batch count."""
# check if histogram summary should be run for this epoch
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._epoch = epoch
# pylint: disable=protected-access
# add the histogram summary op if it should run this epoch
self.model._make_test_function()
if self.merged not in self.model.test_function.fetches:
self.model.test_function.fetches.append(self.merged)
self.model.test_function.fetch_callbacks[
self.merged] = self._fetch_callback
# pylint: enable=protected-access
def on_epoch_end(self, epoch, logs=None):
"""Checks if summary ops should run next epoch, logs scalar summaries."""
# don't output batch_size and
# batch number as TensorBoard summaries
logs = {('epoch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
if self.update_freq == 'epoch':
step = epoch
else:
step = self._samples_seen
self._write_custom_summaries(step, logs)
# pop the histogram summary op after each epoch
if self.histogram_freq:
# pylint: disable=protected-access
if self.merged in self.model.test_function.fetches:
self.model.test_function.fetches.remove(self.merged)
if self.merged in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self.merged)
# pylint: enable=protected-access
if self.embeddings_data is None and self.embeddings_freq:
raise ValueError('To visualize embeddings, embeddings_data must '
'be provided.')
if self.embeddings_freq and self.embeddings_data is not None:
if epoch % self.embeddings_freq == 0:
# We need a second forward-pass here because we're passing
# the `embeddings_data` explicitly. This design allows to pass
# arbitrary data as `embeddings_data` and results from the fact
# that we need to know the size of the `tf.Variable`s which
# hold the embeddings in `set_model`. At this point, however,
# the `validation_data` is not yet set.
embeddings_data = self.embeddings_data
n_samples = embeddings_data[0].shape[0]
i = 0
sess = K.get_session()
while i < n_samples:
step = min(self.batch_size, n_samples - i)
batch = slice(i, i + step)
if isinstance(self.model.input, list):
feed_dict = {
model_input: embeddings_data[idx][batch]
for idx, model_input in enumerate(self.model.input)
}
else:
feed_dict = {self.model.input: embeddings_data[0][batch]}
feed_dict.update({self.batch_id: i, self.step: step})
if not isinstance(K.learning_phase(), int):
feed_dict[K.learning_phase()] = False
sess.run(self.assign_embeddings, feed_dict=feed_dict)
self.saver.save(sess,
os.path.join(self.log_dir, 'keras_embedding.ckpt'),
epoch)
i += self.batch_size
def on_train_end(self, logs=None):
if self._is_profiling:
profiler.save(self.log_dir, profiler.stop())
self._is_profiling = False
self.writer.close()
|
apache-2.0
|
huonw/servo
|
tests/wpt/update/updatecommandline.py
|
210
|
1736
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
def create_parser():
from wptrunner import wptcommandline
parser = wptcommandline.create_parser_update()
parser.add_argument("--upstream", dest="upstream", action="store_true", default=None,
help="Push local changes to upstream repository even when not syncing")
parser.add_argument("--no-upstream", dest="upstream", action="store_false", default=None,
help="Dont't push local changes to upstream repository when syncing")
parser.add_argument("--token-file", action="store", type=wptcommandline.abs_path,
help="Path to file containing github token")
parser.add_argument("--token", action="store", help="GitHub token to use")
return parser
def check_args(kwargs):
from wptrunner import wptcommandline
wptcommandline.set_from_config(kwargs)
kwargs["upstream"] = kwargs["upstream"] if kwargs["upstream"] is not None else kwargs["sync"]
if kwargs["upstream"]:
if kwargs["rev"]:
raise ValueError("Setting --rev with --upstream isn't supported")
if kwargs["token"] is None:
if kwargs["token_file"] is None:
raise ValueError("Must supply either a token file or a token")
with open(kwargs["token_file"]) as f:
token = f.read().strip()
kwargs["token"] = token
del kwargs["token_file"]
return kwargs
def parse_args():
parser = create_parser()
kwargs = vars(parser.parse_args())
return check_args(kwargs)
|
mpl-2.0
|
40023256/2015cdag1man
|
static/Brython3.1.1-20150328-091302/Lib/genericpath.py
|
727
|
3093
|
"""
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except os.error:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path on systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
# NOTE: This code must work for text and bytes strings.
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex:filenameIndex+1] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, p[:0]
|
gpl-3.0
|
vikitripathi/MB-MessApp-API
|
messApp/env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py
|
330
|
3364
|
# urllib3/exceptions.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class ConnectionError(HTTPError):
"Raised when a normal connection fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
## Leaf Exceptions
class MaxRetryError(RequestError):
"Raised when the maximum number of retries is exceeded."
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s" % url
if reason:
message += " (Caused by %s: %s)" % (type(reason), reason)
else:
message += " (Caused by redirect)"
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationParseError(ValueError, HTTPError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
|
apache-2.0
|
thierry1985/project-1022
|
translate/pddl/pddl_types.py
|
3
|
2620
|
# Renamed from types.py to avoid clash with stdlib module.
# In the future, use explicitly relative imports or absolute
# imports as a better solution.
import conditions
import graph
import itertools
class Type(object):
def __init__(self, name, basetype_name=None):
self.name = name
self.basetype_name = basetype_name
def __str__(self):
return self.name
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.basetype_name)
def set_supertypes(type_list):
typename_to_type = {}
child_types = []
for type in type_list:
type.supertype_names = []
typename_to_type[type.name] = type
if type.basetype_name:
child_types.append((type.name, type.basetype_name))
for (desc_name, anc_name) in graph.transitive_closure(child_types):
typename_to_type[desc_name].supertype_names.append(anc_name)
class TypedObject(object):
def __init__(self, name, type):
self.name = name
if isinstance(type,list):
self.type = tuple(type)
else:
self.type = type
def __hash__(self):
return hash((self.name, self.type))
def __eq__(self, other):
return self.name == other.name and self.type == other.type
def __ne__(self, other):
return not self == other
def __str__(self):
return "%s: %s" % (self.name, self.type)
def uniquify_name(self, type_map, renamings):
if self.name not in type_map:
type_map[self.name] = self.type
return self
for counter in itertools.count(1):
new_name = self.name + str(counter)
if new_name not in type_map:
renamings[self.name] = new_name
type_map[new_name] = self.type
return TypedObject(new_name, self.type)
def to_untyped_strips(self):
return conditions.Atom(self.type, [self.name])
# types is used to add (either ...) statements as supertypes
def parse_typed_list(alist, only_variables=False, constructor=TypedObject, functions=False, types=[]):
result = []
while alist:
try:
separator_position = alist.index("-")
except ValueError:
items = alist
if functions:
type = "number"
else:
type = "object"
alist = []
else:
items = alist[:separator_position]
type = alist[separator_position + 1]
if isinstance(type,list):
assert type[0]=="either"
type = tuple(type)
for subtype in type[1:]:
types.append(Type(subtype,type))
alist = alist[separator_position + 2:]
for item in items:
assert not only_variables or item.startswith("?")
entry = constructor(item, type)
result.append(entry)
return result
|
mit
|
rd37/horizon
|
openstack_dashboard/dashboards/project/instances/urls.py
|
7
|
1957
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from openstack_dashboard.dashboards.project.instances import views
INSTANCES = r'^(?P<instance_id>[^/]+)/%s$'
INSTANCES_KEYPAIR = r'^(?P<instance_id>[^/]+)/(?P<keypair_name>[^/]+)/%s$'
VIEW_MOD = 'openstack_dashboard.dashboards.project.instances.views'
urlpatterns = patterns(VIEW_MOD,
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^launch$', views.LaunchInstanceView.as_view(), name='launch'),
url(r'^(?P<instance_id>[^/]+)/$',
views.DetailView.as_view(), name='detail'),
url(INSTANCES % 'update', views.UpdateView.as_view(), name='update'),
url(INSTANCES % 'rebuild', views.RebuildView.as_view(), name='rebuild'),
url(INSTANCES % 'console', 'console', name='console'),
url(INSTANCES % 'vnc', 'vnc', name='vnc'),
url(INSTANCES % 'spice', 'spice', name='spice'),
url(INSTANCES % 'rdp', 'rdp', name='rdp'),
url(INSTANCES % 'resize', views.ResizeView.as_view(), name='resize'),
url(INSTANCES_KEYPAIR % 'decryptpassword',
views.DecryptPasswordView.as_view(), name='decryptpassword'),
)
|
apache-2.0
|
isht3/zulip
|
zerver/views/user_settings.py
|
7
|
12505
|
from __future__ import absolute_import
from typing import Optional, Any
from typing import Text
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib.auth import authenticate, update_session_auth_hash
from django.http import HttpRequest, HttpResponse
from zerver.decorator import authenticated_json_post_view, has_request_variables, REQ
from zerver.lib.actions import do_change_password, \
do_change_full_name, do_change_enable_desktop_notifications, \
do_change_enter_sends, do_change_enable_sounds, \
do_change_enable_offline_email_notifications, do_change_enable_digest_emails, \
do_change_enable_offline_push_notifications, do_change_enable_online_push_notifications, \
do_change_default_desktop_notifications, do_change_autoscroll_forever, \
do_change_enable_stream_desktop_notifications, do_change_enable_stream_sounds, \
do_regenerate_api_key, do_change_avatar_source, do_change_twenty_four_hour_time, \
do_change_left_side_userlist, do_change_default_language, \
do_change_pm_content_in_desktop_notifications
from zerver.lib.avatar import avatar_url
from zerver.lib.i18n import get_available_language_codes
from zerver.lib.response import json_success, json_error
from zerver.lib.upload import upload_avatar_image
from zerver.lib.validator import check_bool, check_string
from zerver.lib.request import JsonableError
from zerver.models import UserProfile, Realm, name_changes_disabled
@has_request_variables
def json_change_ui_settings(request, user_profile,
autoscroll_forever=REQ(validator=check_bool,
default=None),
default_desktop_notifications=REQ(validator=check_bool,
default=None)):
# type: (HttpRequest, UserProfile, Optional[bool], Optional[bool]) -> HttpResponse
result = {}
if autoscroll_forever is not None and \
user_profile.autoscroll_forever != autoscroll_forever:
do_change_autoscroll_forever(user_profile, autoscroll_forever)
result['autoscroll_forever'] = autoscroll_forever
if default_desktop_notifications is not None and \
user_profile.default_desktop_notifications != default_desktop_notifications:
do_change_default_desktop_notifications(user_profile, default_desktop_notifications)
result['default_desktop_notifications'] = default_desktop_notifications
return json_success(result)
@authenticated_json_post_view
@has_request_variables
def json_change_settings(request, user_profile,
full_name=REQ(default=""),
old_password=REQ(default=""),
new_password=REQ(default=""),
confirm_password=REQ(default="")):
# type: (HttpRequest, UserProfile, Text, Text, Text, Text) -> HttpResponse
if not (full_name or new_password):
return json_error(_("No new data supplied"))
if new_password != "" or confirm_password != "":
if new_password != confirm_password:
return json_error(_("New password must match confirmation password!"))
if not authenticate(username=user_profile.email, password=old_password):
return json_error(_("Wrong password!"))
do_change_password(user_profile, new_password)
# In Django 1.10, password changes invalidates sessions, see
# https://docs.djangoproject.com/en/1.10/topics/auth/default/#session-invalidation-on-password-change
# for details. To avoid this logging the user out of his own
# session (which would provide a confusing UX at best), we
# update the session hash here.
update_session_auth_hash(request, user_profile)
# We also save the session to the DB immediately to mitigate
# race conditions. In theory, there is still a race condition
# and to completely avoid it we will have to use some kind of
# mutex lock in `django.contrib.auth.get_user` where session
# is verified. To make that lock work we will have to control
# the AuthenticationMiddleware which is currently controlled
# by Django,
request.session.save()
result = {}
if user_profile.full_name != full_name and full_name.strip() != "":
if name_changes_disabled(user_profile.realm):
# Failingly silently is fine -- they can't do it through the UI, so
# they'd have to be trying to break the rules.
pass
else:
new_full_name = full_name.strip()
if len(new_full_name) > UserProfile.MAX_NAME_LENGTH:
return json_error(_("Name too long!"))
do_change_full_name(user_profile, new_full_name)
result['full_name'] = new_full_name
return json_success(result)
@has_request_variables
def update_display_settings_backend(request, user_profile,
twenty_four_hour_time=REQ(validator=check_bool, default=None),
default_language=REQ(validator=check_string, default=None),
left_side_userlist=REQ(validator=check_bool, default=None)):
# type: (HttpRequest, UserProfile, Optional[bool], Optional[str], Optional[bool]) -> HttpResponse
if (default_language is not None and
default_language not in get_available_language_codes()):
raise JsonableError(_("Invalid language '%s'" % (default_language,)))
result = {} # type: Dict[str, Any]
if (default_language is not None and
user_profile.default_language != default_language):
do_change_default_language(user_profile, default_language)
result['default_language'] = default_language
elif (twenty_four_hour_time is not None and
user_profile.twenty_four_hour_time != twenty_four_hour_time):
do_change_twenty_four_hour_time(user_profile, twenty_four_hour_time)
result['twenty_four_hour_time'] = twenty_four_hour_time
elif (left_side_userlist is not None and
user_profile.left_side_userlist != left_side_userlist):
do_change_left_side_userlist(user_profile, left_side_userlist)
result['left_side_userlist'] = left_side_userlist
return json_success(result)
@has_request_variables
def json_change_notify_settings(request, user_profile,
enable_stream_desktop_notifications=REQ(validator=check_bool,
default=None),
enable_stream_sounds=REQ(validator=check_bool,
default=None),
enable_desktop_notifications=REQ(validator=check_bool,
default=None),
enable_sounds=REQ(validator=check_bool,
default=None),
enable_offline_email_notifications=REQ(validator=check_bool,
default=None),
enable_offline_push_notifications=REQ(validator=check_bool,
default=None),
enable_online_push_notifications=REQ(validator=check_bool,
default=None),
enable_digest_emails=REQ(validator=check_bool,
default=None),
pm_content_in_desktop_notifications=REQ(validator=check_bool,
default=None)):
# type: (HttpRequest, UserProfile, Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[bool]) -> HttpResponse
result = {}
# Stream notification settings.
if enable_stream_desktop_notifications is not None and \
user_profile.enable_stream_desktop_notifications != enable_stream_desktop_notifications:
do_change_enable_stream_desktop_notifications(
user_profile, enable_stream_desktop_notifications)
result['enable_stream_desktop_notifications'] = enable_stream_desktop_notifications
if enable_stream_sounds is not None and \
user_profile.enable_stream_sounds != enable_stream_sounds:
do_change_enable_stream_sounds(user_profile, enable_stream_sounds)
result['enable_stream_sounds'] = enable_stream_sounds
# PM and @-mention settings.
if enable_desktop_notifications is not None and \
user_profile.enable_desktop_notifications != enable_desktop_notifications:
do_change_enable_desktop_notifications(user_profile, enable_desktop_notifications)
result['enable_desktop_notifications'] = enable_desktop_notifications
if enable_sounds is not None and \
user_profile.enable_sounds != enable_sounds:
do_change_enable_sounds(user_profile, enable_sounds)
result['enable_sounds'] = enable_sounds
if enable_offline_email_notifications is not None and \
user_profile.enable_offline_email_notifications != enable_offline_email_notifications:
do_change_enable_offline_email_notifications(user_profile, enable_offline_email_notifications)
result['enable_offline_email_notifications'] = enable_offline_email_notifications
if enable_offline_push_notifications is not None and \
user_profile.enable_offline_push_notifications != enable_offline_push_notifications:
do_change_enable_offline_push_notifications(user_profile, enable_offline_push_notifications)
result['enable_offline_push_notifications'] = enable_offline_push_notifications
if enable_online_push_notifications is not None and \
user_profile.enable_online_push_notifications != enable_online_push_notifications:
do_change_enable_online_push_notifications(user_profile, enable_online_push_notifications)
result['enable_online_push_notifications'] = enable_online_push_notifications
if enable_digest_emails is not None and \
user_profile.enable_digest_emails != enable_digest_emails:
do_change_enable_digest_emails(user_profile, enable_digest_emails)
result['enable_digest_emails'] = enable_digest_emails
if pm_content_in_desktop_notifications is not None and \
user_profile.pm_content_in_desktop_notifications != pm_content_in_desktop_notifications:
do_change_pm_content_in_desktop_notifications(user_profile, pm_content_in_desktop_notifications)
result['pm_content_in_desktop_notifications'] = pm_content_in_desktop_notifications
return json_success(result)
def set_avatar_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
if len(request.FILES) != 1:
return json_error(_("You must upload exactly one avatar."))
user_file = list(request.FILES.values())[0]
upload_avatar_image(user_file, user_profile, user_profile.email)
do_change_avatar_source(user_profile, UserProfile.AVATAR_FROM_USER)
user_avatar_url = avatar_url(user_profile)
json_result = dict(
avatar_url = user_avatar_url
)
return json_success(json_result)
def delete_avatar_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
do_change_avatar_source(user_profile, UserProfile.AVATAR_FROM_GRAVATAR)
gravatar_url = avatar_url(user_profile)
json_result = dict(
avatar_url = gravatar_url
)
return json_success(json_result)
@has_request_variables
def regenerate_api_key(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
do_regenerate_api_key(user_profile)
json_result = dict(
api_key = user_profile.api_key
)
return json_success(json_result)
@has_request_variables
def change_enter_sends(request, user_profile,
enter_sends=REQ(validator=check_bool)):
# type: (HttpRequest, UserProfile, bool) -> HttpResponse
do_change_enter_sends(user_profile, enter_sends)
return json_success()
|
apache-2.0
|
chrisglass/kubernetes
|
hack/lookup_pull.py
|
194
|
1290
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to print out PR info in release note format.
import json
import sys
import urllib2
PULLQUERY=("https://api.github.com/repos/"
"kubernetes/kubernetes/pulls/{pull}")
LOGIN="login"
TITLE="title"
USER="user"
def print_pulls(pulls):
for pull in pulls:
d = json.loads(urllib2.urlopen(PULLQUERY.format(pull=pull)).read())
print "* {title} #{pull} ({author})".format(
title=d[TITLE], pull=pull, author=d[USER][LOGIN])
if __name__ == "__main__":
if len(sys.argv) < 2:
print ("Usage: {cmd} <pulls>...: Prints out short " +
"markdown description for PRs appropriate for release notes.")
sys.exit(1)
print_pulls(sys.argv[1:])
|
apache-2.0
|
knehez/edx-platform
|
lms/djangoapps/teams/migrations/0001_initial.py
|
84
|
7593
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseTeam'
db.create_table('teams_courseteam', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('team_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('topic_id', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, blank=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=300)),
('country', self.gf('django_countries.fields.CountryField')(max_length=2, blank=True)),
('language', self.gf('student.models.LanguageField')(max_length=16, blank=True)),
))
db.send_create_signal('teams', ['CourseTeam'])
# Adding model 'CourseTeamMembership'
db.create_table('teams_courseteammembership', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('team', self.gf('django.db.models.fields.related.ForeignKey')(related_name='membership', to=orm['teams.CourseTeam'])),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('teams', ['CourseTeamMembership'])
# Adding unique constraint on 'CourseTeamMembership', fields ['user', 'team']
db.create_unique('teams_courseteammembership', ['user_id', 'team_id'])
def backwards(self, orm):
# Removing unique constraint on 'CourseTeamMembership', fields ['user', 'team']
db.delete_unique('teams_courseteammembership', ['user_id', 'team_id'])
# Deleting model 'CourseTeam'
db.delete_table('teams_courseteam')
# Deleting model 'CourseTeamMembership'
db.delete_table('teams_courseteammembership')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.courseteam': {
'Meta': {'object_name': 'CourseTeam'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'language': ('student.models.LanguageField', [], {'max_length': '16', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'team_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'topic_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.CourseTeamMembership']", 'to': "orm['auth.User']"})
},
'teams.courseteammembership': {
'Meta': {'unique_together': "(('user', 'team'),)", 'object_name': 'CourseTeamMembership'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'membership'", 'to': "orm['teams.CourseTeam']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['teams']
|
agpl-3.0
|
pastephens/pysal
|
pysal/spreg/ols_regimes.py
|
15
|
25402
|
"""
Ordinary Least Squares regression with regimes.
"""
__author__ = "Luc Anselin [email protected], Pedro V. Amaral [email protected], Daniel Arribas-Bel [email protected]"
import regimes as REGI
import user_output as USER
import multiprocessing as mp
from ols import BaseOLS
from utils import set_warn, spbroadcast, RegressionProps_basic, RegressionPropsY, spdot
from robust import hac_multi
import summary_output as SUMMARY
import numpy as np
from platform import system
import scipy.sparse as SP
class OLS_Regimes(BaseOLS, REGI.Regimes_Frame, RegressionPropsY):
"""
Ordinary least squares with results and diagnostics.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
w : pysal W object
Spatial weights object (required if running spatial
diagnostics)
robust : string
If 'white', then a White consistent estimator of the
variance-covariance matrix is given. If 'hac', then a
HAC consistent estimator of the variance-covariance
matrix is given. Default set to None.
gwk : pysal W object
Kernel spatial weights needed for HAC estimation. Note:
matrix must have ones along the main diagonal.
sig2n_k : boolean
If True, then use n-k to estimate sigma^2. If False, use n.
nonspat_diag : boolean
If True, then compute non-spatial diagnostics on
the regression.
spat_diag : boolean
If True, then compute Lagrange multiplier tests (requires
w). Note: see moran for further tests.
moran : boolean
If True, compute Moran's I on the residuals. Note:
requires spat_diag=True.
white_test : boolean
If True, compute White's specification robust test.
(requires nonspat_diag=True)
vm : boolean
If True, include variance-covariance matrix in summary
results
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
robust : string
Adjustment for robust standard errors
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
r2 : float
R squared
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
ar2 : float
Adjusted R squared
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
utu : float
Sum of squared residuals
sig2 : float
Sigma squared used in computations
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
sig2ML : float
Sigma squared (maximum likelihood)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
f_stat : tuple
Statistic (float), p-value (float)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
logll : float
Log likelihood
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
aic : float
Akaike information criterion
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
schwarz : float
Schwarz information criterion
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
std_err : array
1xk array of standard errors of the betas
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
t_stat : list of tuples
t statistic; each tuple contains the pair (statistic,
p-value), where each is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mulColli : float
Multicollinearity condition number
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
jarque_bera : dictionary
'jb': Jarque-Bera statistic (float); 'pvalue': p-value
(float); 'df': degrees of freedom (int)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
breusch_pagan : dictionary
'bp': Breusch-Pagan statistic (float); 'pvalue': p-value
(float); 'df': degrees of freedom (int)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
koenker_bassett : dictionary
'kb': Koenker-Bassett statistic (float); 'pvalue':
p-value (float); 'df': degrees of freedom (int)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
white : dictionary
'wh': White statistic (float); 'pvalue': p-value (float);
'df': degrees of freedom (int)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
lm_error : tuple
Lagrange multiplier test for spatial error model; tuple
contains the pair (statistic, p-value), where each is a
float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
lm_lag : tuple
Lagrange multiplier test for spatial lag model; tuple
contains the pair (statistic, p-value), where each is a
float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
rlm_error : tuple
Robust lagrange multiplier test for spatial error model;
tuple contains the pair (statistic, p-value), where each
is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
rlm_lag : tuple
Robust lagrange multiplier test for spatial lag model;
tuple contains the pair (statistic, p-value), where each
is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
lm_sarma : tuple
Lagrange multiplier test for spatial SARMA model; tuple
contains the pair (statistic, p-value), where each is a
float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
moran_res : tuple
Moran's I for the residuals; tuple containing the triple
(Moran's I, standardized Moran's I, p-value)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
title : string
Name of the regression method used
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
sig2n : float
Sigma squared (computed with n in the denominator)
sig2n_k : float
Sigma squared (computed with n-k in the denominator)
xtx : float
X'X
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
xtxi : float
(X'X)^-1
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: ['one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
Examples
--------
>>> import numpy as np
>>> import pysal
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it
the dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = db.by_col(y_var)
>>> y = np.array(y).reshape(len(y), 1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
We can now run the regression and then have a summary of the output
by typing: olsr.summary
Alternatively, we can just check the betas and standard errors of the
parameters:
>>> olsr = OLS_Regimes(y, x, regimes, nonspat_diag=False, name_y=y_var, name_x=['PS90','UE90'], name_regimes=r_var, name_ds='NAT')
>>> olsr.betas
array([[ 0.39642899],
[ 0.65583299],
[ 0.48703937],
[ 5.59835 ],
[ 1.16210453],
[ 0.53163886]])
>>> np.sqrt(olsr.vm.diagonal())
array([ 0.24816345, 0.09662678, 0.03628629, 0.46894564, 0.21667395,
0.05945651])
>>> olsr.cols2regi
'all'
"""
def __init__(self, y, x, regimes,
w=None, robust=None, gwk=None, sig2n_k=True,
nonspat_diag=True, spat_diag=False, moran=False, white_test=False,
vm=False, constant_regi='many', cols2regi='all',
regime_err_sep=True, cores=False,
name_y=None, name_x=None, name_regimes=None,
name_w=None, name_gwk=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y)
USER.check_robust(robust, gwk)
USER.check_spat_diag(spat_diag, w)
self.name_x_r = USER.set_name_x(name_x, x)
self.constant_regi = constant_regi
self.cols2regi = cols2regi
self.name_w = USER.set_name_w(name_w, w)
self.name_gwk = USER.set_name_w(name_gwk, gwk)
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_regimes = USER.set_name_ds(name_regimes)
self.n = n
cols2regi = REGI.check_cols2regi(
constant_regi, cols2regi, x, add_cons=False)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, self.n, x.shape[1])
if regime_err_sep == True and robust == 'hac':
set_warn(
self, "Error by regimes is incompatible with HAC estimation. Hence, error by regimes has been disabled for this model.")
regime_err_sep = False
self.regime_err_sep = regime_err_sep
if regime_err_sep == True and set(cols2regi) == set([True]) and constant_regi == 'many':
self.y = y
name_x = USER.set_name_x(name_x, x)
regi_ids = dict(
(r, list(np.where(np.array(regimes) == r)[0])) for r in self.regimes_set)
self._ols_regimes_multi(x, w, regi_ids, cores,
gwk, sig2n_k, robust, nonspat_diag, spat_diag, vm, name_x, moran, white_test)
else:
name_x = USER.set_name_x(name_x, x, constant=True)
x, self.name_x = REGI.Regimes_Frame.__init__(self, x,
regimes, constant_regi, cols2regi, name_x)
BaseOLS.__init__(
self, y=y, x=x, robust=robust, gwk=gwk, sig2n_k=sig2n_k)
if regime_err_sep == True and robust == None:
y2, x2 = REGI._get_weighted_var(
regimes, self.regimes_set, sig2n_k, self.u, y, x)
ols2 = BaseOLS(y=y2, x=x2, sig2n_k=sig2n_k)
RegressionProps_basic(self, betas=ols2.betas, vm=ols2.vm)
self.title = "ORDINARY LEAST SQUARES - REGIMES (Group-wise heteroskedasticity)"
nonspat_diag = None
set_warn(
self, "Residuals treated as homoskedastic for the purpose of diagnostics.")
else:
self.title = "ORDINARY LEAST SQUARES - REGIMES"
self.robust = USER.set_robust(robust)
self.chow = REGI.Chow(self)
SUMMARY.OLS(reg=self, vm=vm, w=w, nonspat_diag=nonspat_diag,
spat_diag=spat_diag, moran=moran, white_test=white_test, regimes=True)
def _ols_regimes_multi(self, x, w, regi_ids, cores,
gwk, sig2n_k, robust, nonspat_diag, spat_diag, vm, name_x, moran, white_test):
results_p = {}
"""
for r in self.regimes_set:
if system() == 'Windows':
is_win = True
results_p[r] = _work(*(self.y,x,w,regi_ids,r,robust,sig2n_k,self.name_ds,self.name_y,name_x,self.name_w,self.name_regimes))
else:
pool = mp.Pool(cores)
results_p[r] = pool.apply_async(_work,args=(self.y,x,w,regi_ids,r,robust,sig2n_k,self.name_ds,self.name_y,name_x,self.name_w,self.name_regimes))
is_win = False
"""
for r in self.regimes_set:
if cores:
pool = mp.Pool(None)
results_p[r] = pool.apply_async(_work, args=(
self.y, x, w, regi_ids, r, robust, sig2n_k, self.name_ds, self.name_y, name_x, self.name_w, self.name_regimes))
else:
results_p[r] = _work(*(self.y, x, w, regi_ids, r, robust, sig2n_k,
self.name_ds, self.name_y, name_x, self.name_w, self.name_regimes))
self.kryd = 0
self.kr = x.shape[1] + 1
self.kf = 0
self.nr = len(self.regimes_set)
self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float)
self.betas = np.zeros((self.nr * self.kr, 1), float)
self.u = np.zeros((self.n, 1), float)
self.predy = np.zeros((self.n, 1), float)
"""
if not is_win:
pool.close()
pool.join()
"""
if cores:
pool.close()
pool.join()
results = {}
self.name_y, self.name_x = [], []
counter = 0
for r in self.regimes_set:
"""
if is_win:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
"""
if not cores:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
self.vm[(counter * self.kr):((counter + 1) * self.kr),
(counter * self.kr):((counter + 1) * self.kr)] = results[r].vm
self.betas[
(counter * self.kr):((counter + 1) * self.kr), ] = results[r].betas
self.u[regi_ids[r], ] = results[r].u
self.predy[regi_ids[r], ] = results[r].predy
self.name_y += results[r].name_y
self.name_x += results[r].name_x
counter += 1
self.multi = results
self.hac_var = x
if robust == 'hac':
hac_multi(self, gwk)
self.chow = REGI.Chow(self)
if spat_diag:
self._get_spat_diag_props(x, sig2n_k)
SUMMARY.OLS_multi(reg=self, multireg=self.multi, vm=vm, nonspat_diag=nonspat_diag,
spat_diag=spat_diag, moran=moran, white_test=white_test, regimes=True, w=w)
def _get_spat_diag_props(self, x, sig2n_k):
self.k = self.kr
self._cache = {}
x = np.hstack((np.ones((x.shape[0], 1)), x))
self.x = REGI.regimeX_setup(
x, self.regimes, [True] * x.shape[1], self.regimes_set)
self.xtx = spdot(self.x.T, self.x)
self.xtxi = np.linalg.inv(self.xtx)
def _work(y, x, w, regi_ids, r, robust, sig2n_k, name_ds, name_y, name_x, name_w, name_regimes):
y_r = y[regi_ids[r]]
x_r = x[regi_ids[r]]
x_constant = USER.check_constant(x_r)
if robust == 'hac':
robust = None
model = BaseOLS(y_r, x_constant, robust=robust, sig2n_k=sig2n_k)
model.title = "ORDINARY LEAST SQUARES ESTIMATION - REGIME %s" % r
model.robust = USER.set_robust(robust)
model.name_ds = name_ds
model.name_y = '%s_%s' % (str(r), name_y)
model.name_x = ['%s_%s' % (str(r), i) for i in name_x]
model.name_w = name_w
model.name_regimes = name_regimes
if w:
w_r, warn = REGI.w_regime(w, regi_ids[r], r, transform=True)
set_warn(model, warn)
model.w = w_r
return model
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
db = pysal.open(pysal.examples.get_path('columbus.dbf'), 'r')
y_var = 'CRIME'
y = np.array([db.by_col(y_var)]).reshape(49, 1)
x_var = ['INC', 'HOVAL']
x = np.array([db.by_col(name) for name in x_var]).T
r_var = 'NSA'
regimes = db.by_col(r_var)
w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
w.transform = 'r'
olsr = OLS_Regimes(y, x, regimes, w=w, constant_regi='many', nonspat_diag=False, spat_diag=False, name_y=y_var, name_x=['INC', 'HOVAL'],
name_ds='columbus', name_regimes=r_var, name_w='columbus.gal', regime_err_sep=True, cols2regi=[True, True], sig2n_k=True, robust='white')
print olsr.summary
|
bsd-3-clause
|
lalitkumarj/NEXT-psych
|
gui/base/app_manager/PoolBasedTripletMDS/PoolBasedTripletMDS.py
|
1
|
9004
|
import csv, json, os, requests, sys
from wtforms import Form, FieldList, FloatField, FormField, TextField, IntegerField, SelectField, validators, RadioField
from jinja2 import Environment, FileSystemLoader
import copy
from base.models import Experiment
from flask import render_template
from base.settings import Config
from base.app_manager.app_resource_prototype import AppResourcePrototype
config = Config()
TEMPLATES_DIRECTORY = os.path.dirname(__file__)
loader = FileSystemLoader(TEMPLATES_DIRECTORY)
env = Environment(loader=loader)
class PoolBasedTripletMDS(AppResourcePrototype):
"""
PoolBased TripletMDS
Author: Lalit Jain
App resource for PoolBasedTripletMDS.
"""
def get_experiment_params(self, args=None):
"""
Return a form with specific params for the new experiment form.
"""
alg_list = ['RandomSampling', 'UncertaintySampling','CrowdKernel']
# Alg row: follow this post: http://stackoverflow.com/questions/11402627/how-to-get-a-build-a-form-with-repeated-elements-well
class AlgorithmDefinitionRowForm(Form):
alg_label = TextField('Algorithm Label')
alg_id = SelectField('Algorithm Id', choices=[(algorithm, algorithm) for algorithm in alg_list])
alg_proportion = FloatField('Algorithm Proportion')
class PoolBasedTripletMDSParamsForm(Form):
d = IntegerField('Dimension', validators=[validators.required()], default=2)
failure_probability = FloatField('Confidence Level', validators=[validators.required()], default=0.95)
algorithm_management = RadioField('Algorithm Management',
choices=[('fixed_proportions','Fixed Proportions'),
('pure_exploration','Pure Exploration'),
('explore_exploit','Explore Exploit')],
default='fixed_proportions')
participant_to_algorithm_management = RadioField('Participant to Algorithm Management',
choices=[('one_to_many','One-to-many'),
('one_to_one','One-to-one')],
default='one_to_many')
# process the experiment parameters
def process_experiment_params(self):
return True
# List field of the rows of algorithm labels and alg id's
alg_rows = FieldList(FormField(AlgorithmDefinitionRowForm))
template = env.get_template("new_experiment_params.html")
return template, PoolBasedTripletMDSParamsForm
def get_experiment_dashboard(self, args=None):
"""
Return template with embedded widgets/plots/data for the dashboard.
"""
template = env.get_template("experiment_dashboard.html")
html = render_template(template)
return html
def get_formatted_participant_data(self, current_experiment, args=None):
"""
Return formatted participant logs that are app specific.
"""
# Use frontend base local url
url = "http://"+config.NEXT_BACKEND_HOST+":"+config.NEXT_BACKEND_PORT+"/api/experiment/"+current_experiment.exp_uid+"/"+current_experiment.exp_key+"/participants"
# Make a request to next_backend for the responses
try:
response = requests.get(url)
response_dict = eval(response.text)
except (requests.HTTPError, requests.ConnectionError) as e:
print "excepted e", e
raise
print response_dict
# Parse them into a csv
# The rows are participant_id, timestamp, center, left, right, target winner, alg_label
participant_responses = []
participant_responses.append(",".join(["Participant ID", "Timestamp","Center", "Left", "Right", "Answer", "Alg Label"]))
for participant_id, response_list in response_dict['participant_responses'].iteritems():
for response in response_list:
line = [participant_id, response['timestamp_query_generated']]
targets = {}
# This index is not a backend index! It is just one of the target_indices
for index in response['target_indices']:
targets[index['label']] = index
# Check for the index winner in this response
# Shouldn't we check for target_winner?
if 'index_winner' in response.keys() and response["index_winner"] == index['index']:
target_winner = index
# Append the center, left, right targets
line.extend([targets['center']['target']['target_id'], targets['left']['target']['target_id'], targets['right']['target']['target_id']])
# Append the target winner
line.append(target_winner['target']['target_id'])
# Append the alg_label
line.append(response['alg_label'])
participant_responses.append(",".join(line))
return participant_responses
def run_experiment(self, current_experiment, args=None):
"""
Run an initExp call on the frontend base level.
"""
# Set up request dictionary for api initExp call
initExp_dict = {}
initExp_dict['app_id'] = current_experiment.app_id
initExp_dict['site_id'] = config.SITE_ID
initExp_dict['site_key'] = config.SITE_KEY
initExp_dict['args'] = {}
# Set up args for api initExp call
initExp_dict['args']['instructions'] = current_experiment.instructions
initExp_dict['args']['debrief'] = current_experiment.debrief
initExp_dict['args']['d'] = current_experiment.params['d']
initExp_dict['args']['n'] = len(current_experiment.target_set.targets)
initExp_dict['args']['failure_probability'] = current_experiment.params['failure_probability']
initExp_dict['args']['participant_to_algorithm_management'] = current_experiment.params['participant_to_algorithm_management']
initExp_dict['args']['algorithm_management_settings'] = {}
initExp_dict['args']['algorithm_management_settings']['mode'] = current_experiment.params['algorithm_management']
initExp_dict['args']['algorithm_management_settings']['params'] = {}
initExp_dict['args']['algorithm_management_settings']['params']['proportions'] = []
initExp_dict['args']['alg_list'] = []
for alg in current_experiment.params['alg_rows']:
params_dict = copy.deepcopy(alg)
params_dict['params'] = {}
params_dict['test_alg_label'] = 'Test'
del params_dict['alg_proportion']
initExp_dict['args']['alg_list'].append(params_dict)
proportions_dict = {}
proportions_dict['alg_label'] = alg['alg_label']
proportions_dict['proportion'] = alg['alg_proportion']
initExp_dict['args']['algorithm_management_settings']['params']['proportions'].append(proportions_dict)
# Make request for initExp
try:
url = "http://"+config.NEXT_BACKEND_HOST+":"+config.NEXT_BACKEND_PORT+"/api/experiment"
response = requests.post(url,
json.dumps(initExp_dict),
headers={'content-type':'application/json'})
response_dict = eval(response.text)
except:
exc_class, exc, tb = sys.exc_info()
new_exc = Exception("%s. Error connecting to backend."%(exc or exc_class))
raise new_exc.__class__,new_exc, tb
return response_dict
def get_query(self, app_id, exp_uid, widget_key, args=None):
"""
Render custom query for app type
"""
# Make this more flexible
next_backend_url = "http://"+config.NEXT_BACKEND_GLOBAL_HOST+":"+config.NEXT_BACKEND_GLOBAL_PORT
# pass in cookie dependent data
requested_experiment = Experiment.objects(exp_uid=exp_uid)[0]
query_tries = requested_experiment.query_tries
debrief = requested_experiment.debrief
instructions = requested_experiment.instructions
template = env.get_template("query.html")
return render_template(template,
app_id=app_id,
exp_uid = exp_uid,
widget_key = widget_key,
next_backend_url=next_backend_url,
query_tries = query_tries,
debrief = debrief,
instructions = instructions)
|
apache-2.0
|
bpramod/azure-linux-extensions
|
CustomScript/test/test_file_download.py
|
8
|
1456
|
#!/usr/bin/env python
#
#CustomScript extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import tempfile
import customscript as cs
class TestFileDownload(unittest.TestCase):
def test_download_blob(self):
pass
def download_to_tmp(self, uri):
tmpFile = tempfile.TemporaryFile()
file_path = os.path.abspath(tmpFile.name)
cs.download_and_save_file(uri, file_path)
file_size = os.path.getsize(file_path)
self.assertNotEqual(file_size, 0)
tmpFile.close()
os.unlink(tmpFile.name)
def test_download_bin_file(self):
uri = "http://www.bing.com/rms/Homepage$HPBottomBrand_default/ic/1f76acf2/d3a8cfeb.png"
self.download_to_tmp(uri)
def test_download_text_file(self):
uri = "http://www.bing.com/"
self.download_to_tmp(uri)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
oberlin/django
|
django/conf/locale/ml/formats.py
|
1007
|
1815
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
bsd-3-clause
|
anjel-ershova/python_training
|
fixture/fixture_group.py
|
1
|
4896
|
from model.model_group import Group
import random
class GroupHelper:
def __init__(self, app):
self.app = app
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_first_group(self):
wd = self.app.wd
self.select_group_by_index(0)
def edit_if_not_none(self, field, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field).click()
wd.find_element_by_name(field).clear()
wd.find_element_by_name(field).send_keys(text)
else:
pass
def fill_group_form(self, group):
wd = self.app.wd
self.edit_if_not_none("group_name", group.name)
self.edit_if_not_none("group_header", group.header)
self.edit_if_not_none("group_footer", group.footer)
def create(self, group):
wd = self.app.wd
# open_groups_page
wd.find_element_by_link_text("groups").click()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
self.group_cache = None
def edit_first_group(self):
wd = self.app.wd
self.edit_group_by_index(0)
def edit_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_index(index)
# click edit button
wd.find_element_by_name("edit").click()
self.fill_group_form(new_group_data)
# submit edition
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def edit_group_by_id(self, id, new_group_data):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_id(id)
# click edit button
wd.find_element_by_name("edit").click()
self.fill_group_form(new_group_data)
# submit edition
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def add_selected_contact_to_selected_group_by_id(self, target_group):
wd = self.app.wd
# открыть выпадающий список
to_group = wd.find_element_by_name("to_group")
to_group.click()
# выбор произвольной группы по value
to_group.find_element_by_css_selector("[value='%s']" % target_group.id).click()
wd.find_element_by_name("add").click()
def select_some_group_to_view(self, target_group):
wd = self.app.wd
# открыть выпадающий список
view_group = wd.find_element_by_name("group")
view_group.click()
# выбор произвольной группы по value
view_group.find_element_by_css_selector("[value='%s']" % target_group.id).click()
# def click_add_contact_to_group_button(self):
# wd = self.app.wd
# wd.find_element_by_name("add").click()
# self.app.navigation.open_home_page()
def delete_first_group(self):
wd = self.app.wd
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_index(index)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def delete_group_by_id(self, id):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_id(id)
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def count(self):
wd = self.app.wd
self.app.navigation.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.app.navigation.open_groups_page()
self.group_cache = []
wd.find_elements_by_css_selector("span.group")
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = int(element.find_element_by_name("selected[]").get_attribute("value"))
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
|
apache-2.0
|
raphaelfruneaux/thumbor
|
thumbor/detectors/local_detector.py
|
14
|
2401
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from os.path import join, dirname, abspath, isabs
try:
import cv
except ImportError:
import cv2.cv as cv
from thumbor.point import FocalPoint
from thumbor.detectors import BaseDetector
class CascadeLoaderDetector(BaseDetector):
def load_cascade_file(self, module_path, cascade_file_path):
if not hasattr(self.__class__, 'cascade'):
if isabs(cascade_file_path):
cascade_file = cascade_file_path
else:
cascade_file = join(abspath(dirname(module_path)), cascade_file_path)
self.__class__.cascade = cv.Load(cascade_file)
def get_min_size_for(self, size):
ratio = int(min(size) / 15)
ratio = max(20, ratio)
return (ratio, ratio)
def get_features(self):
engine = self.context.modules.engine
mode, converted_image = engine.image_data_as_rgb(False)
size = engine.size
image = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 3)
cv.SetData(image, converted_image)
gray = cv.CreateImage(size, 8, 1)
convert_mode = getattr(cv, 'CV_%s2GRAY' % mode)
cv.CvtColor(image, gray, convert_mode)
min_size = self.get_min_size_for(size)
haar_scale = 1.2
min_neighbors = 3
cv.EqualizeHist(gray, gray)
faces = cv.HaarDetectObjects(
gray,
self.__class__.cascade, cv.CreateMemStorage(0),
haar_scale, min_neighbors,
cv.CV_HAAR_DO_CANNY_PRUNING, min_size)
faces_scaled = []
for ((x, y, w, h), n) in faces:
# the input to cv.HaarDetectObjects was resized, so scale the
# bounding box of each face and convert it to two CvPoints
x2, y2 = (x + w), (y + h)
faces_scaled.append(((x, y, x2 - x, y2 - y), n))
return faces_scaled
def detect(self, callback):
features = self.get_features()
if features:
for square, neighbors in features:
self.context.request.focal_points.append(FocalPoint.from_square(*square))
callback()
else:
self.next(callback)
|
mit
|
subdigital/nsdateformatter.com
|
node_modules/node-gyp/gyp/pylib/gyp/common.py
|
64
|
20173
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
if sys.platform.startswith('zos'):
return 'zos'
if sys.platform.startswith('os390'):
return 'zos'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
|
mit
|
tongwang01/tensorflow
|
tensorflow/models/rnn/ptb/reader.py
|
24
|
3992
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for parsing PTB text files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import tensorflow as tf
def _read_words(filename):
with tf.gfile.GFile(filename, "r") as f:
return f.read().decode("utf-8").replace("\n", "<eos>").split()
def _build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def _file_to_word_ids(filename, word_to_id):
data = _read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
def ptb_raw_data(data_path=None):
"""Load PTB raw data from data directory "data_path".
Reads PTB text files, converts strings to integer ids,
and performs mini-batching of the inputs.
The PTB dataset comes from Tomas Mikolov's webpage:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
Args:
data_path: string path to the directory where simple-examples.tgz has
been extracted.
Returns:
tuple (train_data, valid_data, test_data, vocabulary)
where each of the data objects can be passed to PTBIterator.
"""
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
word_to_id = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
return train_data, valid_data, test_data, vocabulary
def ptb_producer(raw_data, batch_size, num_steps, name=None):
"""Iterate on the raw PTB data.
This chunks up raw_data into batches of examples and returns Tensors that
are drawn from these batches.
Args:
raw_data: one of the raw data outputs from ptb_raw_data.
batch_size: int, the batch size.
num_steps: int, the number of unrolls.
name: the name of this operation (optional).
Returns:
A pair of Tensors, each shaped [batch_size, num_steps]. The second element
of the tuple is the same data time-shifted to the right by one.
Raises:
tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
"""
with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0 : batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
assertion = tf.assert_positive(
epoch_size,
message="epoch_size == 0, decrease batch_size or num_steps")
with tf.control_dependencies([assertion]):
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.slice(data, [0, i * num_steps], [batch_size, num_steps])
y = tf.slice(data, [0, i * num_steps + 1], [batch_size, num_steps])
return x, y
|
apache-2.0
|
40223112/w16test
|
static/Brython3.1.3-20150514-095342/Lib/os.py
|
635
|
35582
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt', 'os2' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
__all__.append('_exit')
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
try:
from os2 import _have_functions
except ImportError:
pass
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
__all__.append('_exit')
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
try:
from ce import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def _get_masked_mode(mode):
mask = umask(0)
umask(mask)
return mode & ~mask
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(path [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. If the
target directory with the same mode as we specified already exists,
raises an OSError if exist_ok is False, otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError as e:
dir_exists = path.isdir(name)
expected_mode = _get_masked_mode(mode)
if dir_exists:
# S_ISGID is automatically copied by the OS from parent to child
# directories on mkdir. Don't consider it being set to be a mode
# mismatch as mkdir does not unset it when not specified in mode.
actual_mode = st.S_IMODE(lstat(name).st_mode) & ~st.S_ISGID
else:
actual_mode = -1
if not (e.errno == errno.EEXIST and exist_ok and dir_exists and
actual_mode == expected_mode):
if dir_exists and actual_mode != expected_mode:
e.strerror += ' (mode %o != expected mode %o)' % (
actual_mode, expected_mode)
raise
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except FileNotFoundError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except FileNotFoundError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except error as err:
if onerror is not None:
onerror(err)
return
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except error as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from collections.abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
__all__.append("unsetenv")
def _createenviron():
if name in ('os2', 'nt'):
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = name not in ('os2', 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
errors = 'strict'
else:
errors = 'surrogateescape'
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
import copyreg as _copyreg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copyreg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copyreg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
|
agpl-3.0
|
aerickson/OctoPrint
|
src/octoprint/server/api/timelapse.py
|
9
|
2551
|
# coding=utf-8
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import os
from flask import request, jsonify, url_for
from werkzeug.utils import secure_filename
import octoprint.timelapse
import octoprint.util as util
from octoprint.settings import settings, valid_boolean_trues
from octoprint.server import restricted_access, admin_permission
from octoprint.server.util import redirectToTornado
from octoprint.server.api import api
#~~ timelapse handling
@api.route("/timelapse", methods=["GET"])
def getTimelapseData():
timelapse = octoprint.timelapse.current
config = {"type": "off"}
if timelapse is not None and isinstance(timelapse, octoprint.timelapse.ZTimelapse):
config["type"] = "zchange"
config["postRoll"] = timelapse.postRoll()
elif timelapse is not None and isinstance(timelapse, octoprint.timelapse.TimedTimelapse):
config["type"] = "timed"
config["postRoll"] = timelapse.postRoll()
config.update({
"interval": timelapse.interval()
})
files = octoprint.timelapse.getFinishedTimelapses()
for file in files:
file["url"] = url_for("index") + "downloads/timelapse/" + file["name"]
return jsonify({
"config": config,
"files": files
})
@api.route("/timelapse/<filename>", methods=["GET"])
def downloadTimelapse(filename):
return redirectToTornado(request, url_for("index") + "downloads/timelapse/" + filename)
@api.route("/timelapse/<filename>", methods=["DELETE"])
@restricted_access
def deleteTimelapse(filename):
if util.isAllowedFile(filename, {"mpg"}):
secure = os.path.join(settings().getBaseFolder("timelapse"), secure_filename(filename))
if os.path.exists(secure):
os.remove(secure)
return getTimelapseData()
@api.route("/timelapse", methods=["POST"])
@restricted_access
def setTimelapseConfig():
if "type" in request.values:
config = {
"type": request.values["type"],
"postRoll": 0,
"options": {}
}
if "postRoll" in request.values:
try:
config["postRoll"] = int(request.values["postRoll"])
except ValueError:
pass
if "interval" in request.values:
interval = 10
try:
interval = int(request.values["interval"])
except ValueError:
pass
config["options"] = {
"interval": interval
}
if admin_permission.can() and "save" in request.values and request.values["save"] in valid_boolean_trues:
octoprint.timelapse.configureTimelapse(config, True)
else:
octoprint.timelapse.configureTimelapse(config)
return getTimelapseData()
|
agpl-3.0
|
cyberark-bizdev/ansible
|
lib/ansible/modules/cloud/openstack/os_keystone_endpoint.py
|
20
|
6464
|
#!/usr/bin/python
# Copyright: (c) 2017, VEXXHOST, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_endpoint
short_description: Manage OpenStack Identity service endpoints
extends_documentation_fragment: openstack
author:
- Mohammed Naser (@mnaser)
- Alberto Murillo (@albertomurillo)
version_added: "2.5"
description:
- Create, update, or delete OpenStack Identity service endpoints. If a
service with the same combination of I(service), I(interface) and I(region)
exist, the I(url) and I(state) (C(present) or C(absent)) will be updated.
options:
service:
description:
- Name or id of the service.
required: true
interface:
description:
- Interface of the service.
choices: [admin, public, internal]
required: true
url:
description:
- URL of the service.
required: true
region:
description:
- Region that the service belongs to. Note that I(region_name) is used for authentication.
enabled:
description:
- Is the service enabled.
default: True
state:
description:
- Should the resource be C(present) or C(absent).
choices: [present, absent]
default: present
requirements:
- shade >= 1.11.0
'''
EXAMPLES = '''
- name: Create a service for glance
os_keystone_endpoint:
cloud: mycloud
service: glance
endpoint_interface: public
url: http://controller:9292
region: RegionOne
state: present
- name: Delete a service for nova
os_keystone_endpoint:
cloud: mycloud
service: nova
endpoint_interface: public
region: RegionOne
state: absent
'''
RETURN = '''
endpoint:
description: Dictionary describing the endpoint.
returned: On success when I(state) is C(present)
type: complex
contains:
id:
description: Endpoint ID.
type: string
sample: 3292f020780b4d5baf27ff7e1d224c44
region:
description: Region Name.
type: string
sample: RegionOne
service_id:
description: Service ID.
type: string
sample: b91f1318f735494a825a55388ee118f3
interface:
description: Endpoint Interface.
type: string
sample: public
url:
description: Service URL.
type: string
sample: http://controller:9292
enabled:
description: Service status.
type: boolean
sample: True
'''
from distutils.version import StrictVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _needs_update(module, endpoint):
if endpoint.enabled != module.params['enabled']:
return True
if endpoint.url != module.params['url']:
return True
return False
def _system_state_change(module, endpoint):
state = module.params['state']
if state == 'absent' and endpoint:
return True
if state == 'present':
if endpoint is None:
return True
return _needs_update(module, endpoint)
return False
def main():
argument_spec = openstack_full_argument_spec(
service=dict(type='str', required=True),
endpoint_interface=dict(type='str', required=True, choices=['admin', 'public', 'internal']),
url=dict(type='str', required=True),
region=dict(type='str'),
enabled=dict(type='bool', default=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
shade, cloud = openstack_cloud_from_module(module, min_version='1.11.0')
service_name_or_id = module.params['service']
interface = module.params['endpoint_interface']
url = module.params['url']
region = module.params['region']
enabled = module.params['enabled']
state = module.params['state']
try:
cloud = shade.operator_cloud(**module.params)
service = cloud.get_service(service_name_or_id)
if service is None:
module.fail_json(msg='Service %s does not exist' % service_name_or_id)
filters = dict(service_id=service.id, interface=interface)
if region is not None:
filters['region'] = region
endpoints = cloud.search_endpoints(filters=filters)
if len(endpoints) > 1:
module.fail_json(msg='Service %s, interface %s and region %s are '
'not unique' %
(service_name_or_id, interface, region))
elif len(endpoints) == 1:
endpoint = endpoints[0]
else:
endpoint = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, endpoint))
if state == 'present':
if endpoint is None:
result = cloud.create_endpoint(service_name_or_id=service,
url=url, interface=interface,
region=region, enabled=enabled)
endpoint = result[0]
changed = True
else:
if _needs_update(module, endpoint):
endpoint = cloud.update_endpoint(
endpoint.id, url=url, enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, endpoint=endpoint)
elif state == 'absent':
if endpoint is None:
changed = False
else:
cloud.delete_endpoint(endpoint.id)
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
gpl-3.0
|
stefanodoni/HTperf
|
main-live-ack.py
|
2
|
3411
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from functools import partial
from asyncio import get_event_loop, Future, Queue
from toolrack.script import Script
from toolrack.async import ProcessParserProtocol
class CommandOutputParser:
'''Parse process output and push parsed samples to a queue.'''
def __init__(self, loop, queue):
self._loop = loop
self._queue = queue
async def __call__(self, parser):
'''Run a a process and parse its output using the parser.'''
future = Future(loop=self._loop)
parser_func = partial(self._out_parser, parser.parse_line)
protocol = ProcessParserProtocol(future, out_parser=parser_func)
transport, _ = await self._loop.subprocess_exec(
lambda: protocol, *parser.cmdline)
result = await future
transport.close()
await self._queue.put(None) # To end processing
return result
def _out_parser(self, parser_func, line):
value = parser_func(line)
if value is not None:
self._queue.put_nowait(value)
class Collector:
'''Collect samples from a queue and process them.'''
def __init__(self, queue, sample_count):
self._queue = queue
self._sample_count = sample_count
self._samples = []
async def __call__(self, processor):
'''Process samples from a queue using the processor.'''
while True:
sample = await self._queue.get()
if sample is None:
self._process_pending_samples(processor)
return
self._samples.append(sample)
if len(self._samples) == self._sample_count:
self._process_pending_samples(processor)
def _process_pending_samples(self, processor):
'''Process the list of pending samples.'''
samples, self._samples = self._samples, []
if samples:
processor.process(samples)
class ProcessParser:
'''Parse a process output line by line.'''
cmdline = ['vmstat', '1']
#cmdline = ['sudo', 'perf', 'stat', '-I', '1000', '-aAe', 'cycles,instructions']
def parse_line(self, line):
'''Parse a line of process output.
It must return the sample to queue.
'''
tokens = line.split()
try:
return int(tokens[3])
except ValueError:
pass
class SampleProcessor:
'''Process samples.'''
def process(self, samples):
'''Process a list of samples.'''
print(float(sum(samples)) / len(samples))
class AggregatorScript(Script):
def get_parser(self):
parser = ArgumentParser(
description='Parse process output and aggregate samples.')
parser.add_argument(
'-c', '--sample-count', type=int, default=5,
help='number of samples to aggregate')
return parser
def main(self, args):
loop = get_event_loop()
loop.run_until_complete(self._main(loop, args))
loop.close()
async def _main(self, loop, args):
queue = Queue(loop=loop)
parser = CommandOutputParser(loop, queue)
collector = Collector(queue, args.sample_count)
task = loop.create_task(collector(SampleProcessor()))
await parser(ProcessParser())
await task
if __name__ == '__main__':
script = AggregatorScript()
script()
|
gpl-2.0
|
bb111189/Arky2
|
boilerplate/external/pycountry/__init__.py
|
1
|
3459
|
# vim:fileencoding=utf-8
# Copyright (c) 2008-2011 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id$
"""pycountry"""
import os.path
import pycountry.db
LOCALES_DIR = os.path.join(os.path.dirname(__file__), 'locales')
DATABASE_DIR = os.path.join(os.path.dirname(__file__), 'databases')
class Countries(pycountry.db.Database):
"""Provides access to an ISO 3166 database (Countries)."""
field_map = dict(alpha_2_code='alpha2',
alpha_3_code='alpha3',
numeric_code='numeric',
name='name',
official_name='official_name',
common_name='common_name')
data_class_name = 'Country'
xml_tag = 'iso_3166_entry'
class Scripts(pycountry.db.Database):
"""Providess access to an ISO 15924 database (Scripts)."""
field_map = dict(alpha_4_code='alpha4',
numeric_code='numeric',
name='name')
data_class_name = 'Script'
xml_tag = 'iso_15924_entry'
class Currencies(pycountry.db.Database):
"""Providess access to an ISO 4217 database (Currencies)."""
field_map = dict(letter_code='letter',
numeric_code='numeric',
currency_name='name')
data_class_name = 'Currency'
xml_tag = 'iso_4217_entry'
class Languages(pycountry.db.Database):
"""Providess access to an ISO 639-1/2 database (Languages)."""
field_map = dict(iso_639_2B_code='bibliographic',
iso_639_2T_code='terminology',
iso_639_1_code='alpha2',
common_name='common_name',
name='name')
data_class_name = 'Language'
xml_tag = 'iso_639_entry'
class Subdivision(pycountry.db.Data):
parent_code = None
def __init__(self, element, **kw):
super(Subdivision, self).__init__(element, **kw)
self.type = element.parentNode.attributes.get('type').value
self.country_code = self.code.split('-')[0]
if self.parent_code is not None:
self.parent_code = '%s-%s' % (self.country_code, self.parent_code)
@property
def country(self):
return countries.get(alpha2=self.country_code)
@property
def parent(self):
return subdivisions.get(code=self.parent_code)
class Subdivisions(pycountry.db.Database):
# Note: subdivisions can be hierarchical to other subdivisions. The
# parent_code attribute is related to other subdivisons, *not*
# the country!
xml_tag = 'iso_3166_2_entry'
data_class_base = Subdivision
data_class_name = 'Subdivision'
field_map = dict(code='code',
name='name',
parent='parent_code')
no_index = ['name', 'parent_code']
def __init__(self, *args, **kw):
super(Subdivisions, self).__init__(*args, **kw)
# Add index for the country code.
self.indices['country_code'] = {}
for subdivision in self:
divs = self.indices['country_code'].setdefault(
subdivision.country_code, set())
divs.add(subdivision)
countries = Countries(os.path.join(DATABASE_DIR, 'iso3166.xml'))
scripts = Scripts(os.path.join(DATABASE_DIR, 'iso15924.xml'))
currencies = Currencies(os.path.join(DATABASE_DIR, 'iso4217.xml'))
languages = Languages(os.path.join(DATABASE_DIR, 'iso639.xml'))
subdivisions = Subdivisions(os.path.join(DATABASE_DIR, 'iso3166_2.xml'))
|
lgpl-3.0
|
malept/youtube-dl
|
youtube_dl/extractor/mangomolo.py
|
43
|
1834
|
# coding: utf-8
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
int_or_none,
)
class MangomoloBaseIE(InfoExtractor):
def _get_real_id(self, page_id):
return page_id
def _real_extract(self, url):
page_id = self._get_real_id(self._match_id(url))
webpage = self._download_webpage(url, page_id)
hidden_inputs = self._hidden_inputs(webpage)
m3u8_entry_protocol = 'm3u8' if self._IS_LIVE else 'm3u8_native'
format_url = self._html_search_regex(
[
r'file\s*:\s*"(https?://[^"]+?/playlist.m3u8)',
r'<a[^>]+href="(rtsp://[^"]+)"'
], webpage, 'format url')
formats = self._extract_wowza_formats(
format_url, page_id, m3u8_entry_protocol, ['smil'])
self._sort_formats(formats)
return {
'id': page_id,
'title': self._live_title(page_id) if self._IS_LIVE else page_id,
'uploader_id': hidden_inputs.get('userid'),
'duration': int_or_none(hidden_inputs.get('duration')),
'is_live': self._IS_LIVE,
'formats': formats,
}
class MangomoloVideoIE(MangomoloBaseIE):
IE_NAME = 'mangomolo:video'
_VALID_URL = r'https?://admin\.mangomolo\.com/analytics/index\.php/customers/embed/video\?.*?\bid=(?P<id>\d+)'
_IS_LIVE = False
class MangomoloLiveIE(MangomoloBaseIE):
IE_NAME = 'mangomolo:live'
_VALID_URL = r'https?://admin\.mangomolo\.com/analytics/index\.php/customers/embed/index\?.*?\bchannelid=(?P<id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)'
_IS_LIVE = True
def _get_real_id(self, page_id):
return base64.b64decode(compat_urllib_parse_unquote(page_id).encode()).decode()
|
unlicense
|
abilian/abilian-core
|
src/abilian/core/models/tests/test_blob.py
|
1
|
3462
|
""""""
import uuid
from io import StringIO
from flask import Flask
from abilian.core.models.blob import Blob
from abilian.core.sqlalchemy import SQLAlchemy
from abilian.services import repository_service as repository
from abilian.services import session_repository_service as session_repository
#
# Unit tests
#
def test_auto_uuid() -> None:
blob = Blob()
assert blob.uuid is not None
assert isinstance(blob.uuid, uuid.UUID)
# test provided uuid is not replaced by a new one
u = uuid.UUID("4f80f02f-52e3-4fe2-b9f2-2c3e99449ce9")
blob = Blob(uuid=u)
assert isinstance(blob.uuid, uuid.UUID)
assert blob.uuid, u
def test_meta() -> None:
blob = Blob()
assert blob.meta == {}
#
# Integration tests
#
def test_md5(app: Flask, db: SQLAlchemy) -> None:
blob = Blob("test md5")
assert "md5" in blob.meta
assert blob.meta["md5"] == "0e4e3b2681e8931c067a23c583c878d5"
def test_size(app: Flask, db: SQLAlchemy) -> None:
blob = Blob("test")
assert blob.size == 4
def test_filename(app: Flask, db: SQLAlchemy) -> None:
content = StringIO("test")
content.filename = "test.txt"
blob = Blob(content)
assert "filename" in blob.meta
assert blob.meta["filename"] == "test.txt"
def test_mimetype(app: Flask, db: SQLAlchemy) -> None:
content = StringIO("test")
content.content_type = "text/plain"
blob = Blob(content)
assert "mimetype" in blob.meta
assert blob.meta["mimetype"] == "text/plain"
def test_nonzero(app: Flask, db: SQLAlchemy) -> None:
blob = Blob("test md5")
assert bool(blob)
# change uuid: repository will return None for blob.file
blob.uuid = uuid.uuid4()
assert not bool(blob)
# def test_query(app, db):
# session = db.session
# content = b"content"
# b = Blob(content)
# session.add(b)
# session.flush()
#
# assert Blob.query.by_uuid(b.uuid) is b
# assert Blob.query.by_uuid(str(b.uuid)) is b
#
# u = uuid.uuid4()
# assert Blob.query.by_uuid(u) is None
def test_value(app: Flask, db: SQLAlchemy) -> None:
session = db.session
content = b"content"
blob = Blob(content)
tr = session.begin(nested=True)
session.add(blob)
tr.commit()
assert repository.get(blob.uuid) is None
assert session_repository.get(blob, blob.uuid).open("rb").read() == content
assert blob.value == content
session.commit()
assert repository.get(blob.uuid).open("rb").read() == content
assert blob.value == content
session.begin(nested=True) # match session.rollback
with session.begin(nested=True):
session.delete(blob)
# object marked for deletion, but instance attribute should still be
# readable
fd = session_repository.get(blob, blob.uuid).open("rb")
assert fd.read() == content
# commit in transaction: session_repository has no content, 'physical'
# repository still has content
assert session_repository.get(blob, blob.uuid) is None
assert repository.get(blob.uuid).open("rb").read() == content
# rollback: session_repository has content again
session.rollback()
assert session_repository.get(blob, blob.uuid).open("rb").read() == content
session.delete(blob)
session.flush()
assert session_repository.get(blob, blob.uuid) is None
assert repository.get(blob.uuid).open("rb").read() == content
session.commit()
assert repository.get(blob.uuid) is None
|
lgpl-2.1
|
Phonemetra/TurboCoin
|
test/functional/rpc_getblockstats.py
|
1
|
6826
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 TurboCoin
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import TurbocoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(TurbocoinTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].generate(101)
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
|
mit
|
Universal-Model-Converter/UMC3.0a
|
data/Python/x86/Lib/site-packages/OpenGL/GL/NV/shader_buffer_store.py
|
2
|
1943
|
'''OpenGL extension NV.shader_buffer_store
This module customises the behaviour of the
OpenGL.raw.GL.NV.shader_buffer_store to provide a more
Python-friendly API
Overview (from the spec)
This extension builds upon the mechanisms added by the
NV_shader_buffer_load extension to allow shaders to perform random-access
reads to buffer object memory without using dedicated buffer object
binding points. Instead, it allowed an application to make a buffer
object resident, query a GPU address (pointer) for the buffer object, and
then use that address as a pointer in shader code. This approach allows
shaders to access a large number of buffer objects without needing to
repeatedly bind buffers to a limited number of fixed-functionality binding
points.
This extension lifts the restriction from NV_shader_buffer_load that
disallows writes. In particular, the MakeBufferResidentNV function now
allows READ_WRITE and WRITE_ONLY access modes, and the shading language is
extended to allow shaders to write through (GPU address) pointers.
Additionally, the extension provides built-in functions to perform atomic
memory transactions to buffer object memory.
As with the shader writes provided by the EXT_shader_image_load_store
extension, writes to buffer object memory using this extension are weakly
ordered to allow for parallel or distributed shader execution. The
EXT_shader_image_load_store extension provides mechanisms allowing for
finer control of memory transaction order, and those mechanisms apply
equally to buffer object stores using this extension.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/shader_buffer_store.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.shader_buffer_store import *
### END AUTOGENERATED SECTION
|
mit
|
ddzialak/boto
|
tests/integration/s3/test_bucket.py
|
88
|
12516
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the S3 Bucket
"""
from mock import patch, Mock
import unittest
import time
from boto.exception import S3ResponseError
from boto.s3.connection import S3Connection
from boto.s3.bucketlogging import BucketLogging
from boto.s3.lifecycle import Lifecycle
from boto.s3.lifecycle import Transition
from boto.s3.lifecycle import Expiration
from boto.s3.lifecycle import Rule
from boto.s3.acl import Grant
from boto.s3.tagging import Tags, TagSet
from boto.s3.website import RedirectLocation
from boto.compat import urllib
class S3BucketTest (unittest.TestCase):
s3 = True
def setUp(self):
self.conn = S3Connection()
self.bucket_name = 'bucket-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name)
def tearDown(self):
for key in self.bucket:
key.delete()
self.bucket.delete()
def test_next_marker(self):
expected = ["a/", "b", "c"]
for key_name in expected:
key = self.bucket.new_key(key_name)
key.set_contents_from_string(key_name)
# Normal list of first 2 keys will have
# no NextMarker set, so we use last key to iterate
# last element will be "b" so no issue.
rs = self.bucket.get_all_keys(max_keys=2)
for element in rs:
pass
self.assertEqual(element.name, "b")
self.assertEqual(rs.next_marker, None)
# list using delimiter of first 2 keys will have
# a NextMarker set (when truncated). As prefixes
# are grouped together at the end, we get "a/" as
# last element, but luckily we have next_marker.
rs = self.bucket.get_all_keys(max_keys=2, delimiter="/")
for element in rs:
pass
self.assertEqual(element.name, "a/")
self.assertEqual(rs.next_marker, "b")
# ensure bucket.list() still works by just
# popping elements off the front of expected.
rs = self.bucket.list()
for element in rs:
self.assertEqual(element.name, expected.pop(0))
self.assertEqual(expected, [])
def test_list_with_url_encoding(self):
expected = ["α", "β", "γ"]
for key_name in expected:
key = self.bucket.new_key(key_name)
key.set_contents_from_string(key_name)
# ensure bucket.list() still works by just
# popping elements off the front of expected.
orig_getall = self.bucket._get_all
getall = lambda *a, **k: orig_getall(*a, max_keys=2, **k)
with patch.object(self.bucket, '_get_all', getall):
rs = self.bucket.list(encoding_type="url")
for element in rs:
name = urllib.parse.unquote(element.name.encode('utf-8'))
self.assertEqual(name, expected.pop(0))
self.assertEqual(expected, [])
def test_logging(self):
# use self.bucket as the target bucket so that teardown
# will delete any log files that make it into the bucket
# automatically and all we have to do is delete the
# source bucket.
sb_name = "src-" + self.bucket_name
sb = self.conn.create_bucket(sb_name)
# grant log write perms to target bucket using canned-acl
self.bucket.set_acl("log-delivery-write")
target_bucket = self.bucket_name
target_prefix = u"jp/ログ/"
# Check existing status is disabled
bls = sb.get_logging_status()
self.assertEqual(bls.target, None)
# Create a logging status and grant auth users READ PERM
authuri = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
authr = Grant(permission="READ", type="Group", uri=authuri)
sb.enable_logging(target_bucket, target_prefix=target_prefix, grants=[authr])
# Check the status and confirm its set.
bls = sb.get_logging_status()
self.assertEqual(bls.target, target_bucket)
self.assertEqual(bls.prefix, target_prefix)
self.assertEqual(len(bls.grants), 1)
self.assertEqual(bls.grants[0].type, "Group")
self.assertEqual(bls.grants[0].uri, authuri)
# finally delete the src bucket
sb.delete()
def test_tagging(self):
tagging = """
<Tagging>
<TagSet>
<Tag>
<Key>tagkey</Key>
<Value>tagvalue</Value>
</Tag>
</TagSet>
</Tagging>
"""
self.bucket.set_xml_tags(tagging)
response = self.bucket.get_tags()
self.assertEqual(response[0][0].key, 'tagkey')
self.assertEqual(response[0][0].value, 'tagvalue')
self.bucket.delete_tags()
try:
self.bucket.get_tags()
except S3ResponseError as e:
self.assertEqual(e.code, 'NoSuchTagSet')
except Exception as e:
self.fail("Wrong exception raised (expected S3ResponseError): %s"
% e)
else:
self.fail("Expected S3ResponseError, but no exception raised.")
def test_tagging_from_objects(self):
"""Create tags from python objects rather than raw xml."""
t = Tags()
tag_set = TagSet()
tag_set.add_tag('akey', 'avalue')
tag_set.add_tag('anotherkey', 'anothervalue')
t.add_tag_set(tag_set)
self.bucket.set_tags(t)
response = self.bucket.get_tags()
self.assertEqual(response[0][0].key, 'akey')
self.assertEqual(response[0][0].value, 'avalue')
self.assertEqual(response[0][1].key, 'anotherkey')
self.assertEqual(response[0][1].value, 'anothervalue')
def test_website_configuration(self):
response = self.bucket.configure_website('index.html')
self.assertTrue(response)
config = self.bucket.get_website_configuration()
self.assertEqual(config, {'WebsiteConfiguration':
{'IndexDocument': {'Suffix': 'index.html'}}})
config2, xml = self.bucket.get_website_configuration_with_xml()
self.assertEqual(config, config2)
self.assertTrue('<Suffix>index.html</Suffix>' in xml, xml)
def test_website_redirect_all_requests(self):
response = self.bucket.configure_website(
redirect_all_requests_to=RedirectLocation('example.com'))
config = self.bucket.get_website_configuration()
self.assertEqual(config, {
'WebsiteConfiguration': {
'RedirectAllRequestsTo': {
'HostName': 'example.com'}}})
# Can configure the protocol as well.
response = self.bucket.configure_website(
redirect_all_requests_to=RedirectLocation('example.com', 'https'))
config = self.bucket.get_website_configuration()
self.assertEqual(config, {
'WebsiteConfiguration': {'RedirectAllRequestsTo': {
'HostName': 'example.com',
'Protocol': 'https',
}}}
)
def test_lifecycle(self):
lifecycle = Lifecycle()
lifecycle.add_rule('myid', '', 'Enabled', 30)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
self.assertEqual(len(response), 1)
actual_lifecycle = response[0]
self.assertEqual(actual_lifecycle.id, 'myid')
self.assertEqual(actual_lifecycle.prefix, '')
self.assertEqual(actual_lifecycle.status, 'Enabled')
self.assertEqual(actual_lifecycle.transition, None)
def test_lifecycle_with_glacier_transition(self):
lifecycle = Lifecycle()
transition = Transition(days=30, storage_class='GLACIER')
rule = Rule('myid', prefix='', status='Enabled', expiration=None,
transition=transition)
lifecycle.append(rule)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
transition = response[0].transition
self.assertEqual(transition.days, 30)
self.assertEqual(transition.storage_class, 'GLACIER')
self.assertEqual(transition.date, None)
def test_lifecycle_multi(self):
date = '2022-10-12T00:00:00.000Z'
sc = 'GLACIER'
lifecycle = Lifecycle()
lifecycle.add_rule("1", "1/", "Enabled", 1)
lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
lifecycle.add_rule("4", "4/", "Enabled", None,
Transition(days=4, storage_class=sc))
lifecycle.add_rule("5", "5/", "Enabled", None,
Transition(date=date, storage_class=sc))
# set the lifecycle
self.bucket.configure_lifecycle(lifecycle)
# read the lifecycle back
readlifecycle = self.bucket.get_lifecycle_config();
for rule in readlifecycle:
if rule.id == "1":
self.assertEqual(rule.prefix, "1/")
self.assertEqual(rule.expiration.days, 1)
elif rule.id == "2":
self.assertEqual(rule.prefix, "2/")
self.assertEqual(rule.expiration.days, 2)
elif rule.id == "3":
self.assertEqual(rule.prefix, "3/")
self.assertEqual(rule.expiration.date, date)
elif rule.id == "4":
self.assertEqual(rule.prefix, "4/")
self.assertEqual(rule.transition.days, 4)
self.assertEqual(rule.transition.storage_class, sc)
elif rule.id == "5":
self.assertEqual(rule.prefix, "5/")
self.assertEqual(rule.transition.date, date)
self.assertEqual(rule.transition.storage_class, sc)
else:
self.fail("unexpected id %s" % rule.id)
def test_lifecycle_jp(self):
# test lifecycle with Japanese prefix
name = "Japanese files"
prefix = "日本語/"
days = 30
lifecycle = Lifecycle()
lifecycle.add_rule(name, prefix, "Enabled", days)
# set the lifecycle
self.bucket.configure_lifecycle(lifecycle)
# read the lifecycle back
readlifecycle = self.bucket.get_lifecycle_config();
for rule in readlifecycle:
self.assertEqual(rule.id, name)
self.assertEqual(rule.expiration.days, days)
#Note: Boto seems correct? AWS seems broken?
#self.assertEqual(rule.prefix, prefix)
def test_lifecycle_with_defaults(self):
lifecycle = Lifecycle()
lifecycle.add_rule(expiration=30)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
self.assertEqual(len(response), 1)
actual_lifecycle = response[0]
self.assertNotEqual(len(actual_lifecycle.id), 0)
self.assertEqual(actual_lifecycle.prefix, '')
def test_lifecycle_rule_xml(self):
# create a rule directly with id, prefix defaults
rule = Rule(status='Enabled', expiration=30)
s = rule.to_xml()
# Confirm no ID is set in the rule.
self.assertEqual(s.find("<ID>"), -1)
# Confirm Prefix is '' and not set to 'None'
self.assertNotEqual(s.find("<Prefix></Prefix>"), -1)
|
mit
|
savioabuga/django-geonames-field
|
docs/conf.py
|
1
|
8231
|
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import geonames_field
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-geonames-field'
copyright = u'2015, Savio Abuga'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = geonames_field.__version__
# The full version, including alpha/beta/rc tags.
release = geonames_field.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-geonames-fielddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-geonames-field.tex', u'django-geonames-field Documentation',
u'Savio Abuga', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-geonames-field', u'django-geonames-field Documentation',
[u'Savio Abuga'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-geonames-field', u'django-geonames-field Documentation',
u'Savio Abuga', 'django-geonames-field', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
bsd-3-clause
|
chachan/nodeshot
|
nodeshot/core/base/mixins.py
|
5
|
1879
|
"""
reusable restframework mixins for API views
"""
import reversion
from rest_framework.response import Response
class ACLMixin(object):
""" implements ACL in views """
def get_queryset(self):
"""
Returns only objects which are accessible to the current user.
If user is not authenticated all public objects will be returned.
Model must implement AccessLevelManager!
"""
return self.queryset.all().accessible_to(user=self.request.user)
class RevisionUpdate(object):
"""
Mixin that adds compatibility with django reversion for PUT and PATCH requests
"""
def put(self, request, *args, **kwargs):
""" custom put method to support django-reversion """
with reversion.create_revision():
reversion.set_user(request.user)
reversion.set_comment('changed through the RESTful API from ip %s' % request.META['REMOTE_ADDR'])
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
""" custom patch method to support django-reversion """
with reversion.create_revision():
reversion.set_user(request.user)
reversion.set_comment('changed through the RESTful API from ip %s' % request.META['REMOTE_ADDR'])
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
class RevisionCreate(object):
"""
Mixin that adds compatibility with django reversion for POST requests
"""
def post(self, request, *args, **kwargs):
""" custom put method to support django-reversion """
with reversion.create_revision():
reversion.set_user(request.user)
reversion.set_comment('created through the RESTful API from ip %s' % request.META['REMOTE_ADDR'])
return self.create(request, *args, **kwargs)
|
gpl-3.0
|
sharifmamun/ansible
|
v1/ansible/runner/lookup_plugins/pipe.py
|
162
|
1951
|
# (c) 2012, Daniel Hokka Zakrisson <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from ansible import utils, errors
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if isinstance(terms, basestring):
terms = [ terms ]
ret = []
for term in terms:
'''
http://docs.python.org/2/library/subprocess.html#popen-constructor
The shell argument (which defaults to False) specifies whether to use the
shell as the program to execute. If shell is True, it is recommended to pass
args as a string rather than as a sequence
https://github.com/ansible/ansible/issues/6550
'''
term = str(term)
p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.append(stdout.decode("utf-8").rstrip())
else:
raise errors.AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
return ret
|
gpl-3.0
|
tuanavu/python-cookbook-3rd
|
src/6/reading_nested_and_variable_sized_binary_structures/example2.py
|
5
|
1825
|
# Example 2: Introduction of a metaclass
import struct
class StructField:
def __init__(self, format, offset):
self.format = format
self.offset = offset
def __get__(self, instance, cls):
if instance is None:
return self
else:
r = struct.unpack_from(self.format,
instance._buffer, self.offset)
return r[0] if len(r) == 1 else r
class StructureMeta(type):
'''
Metaclass that automatically creates StructField descriptors
'''
def __init__(self, clsname, bases, clsdict):
fields = getattr(self, '_fields_', [])
byte_order = ''
offset = 0
for format, fieldname in fields:
if format.startswith(('<','>','!','@')):
byte_order = format[0]
format = format[1:]
format = byte_order + format
setattr(self, fieldname, StructField(format, offset))
offset += struct.calcsize(format)
setattr(self, 'struct_size', offset)
class Structure(metaclass=StructureMeta):
def __init__(self, bytedata):
self._buffer = memoryview(bytedata)
@classmethod
def from_file(cls, f):
return cls(f.read(cls.struct_size))
if __name__ == '__main__':
class PolyHeader(Structure):
_fields_ = [
('<i', 'file_code'),
('d', 'min_x'),
('d', 'min_y'),
('d', 'max_x'),
('d', 'max_y'),
('i', 'num_polys')
]
f = open('polys.bin','rb')
phead = PolyHeader.from_file(f)
print(phead.file_code == 0x1234)
print('min_x=', phead.min_x)
print('max_x=', phead.max_x)
print('min_y=', phead.min_y)
print('max_y=', phead.max_y)
print('num_polys=', phead.num_polys)
|
mit
|
stvstnfrd/edx-platform
|
openedx/core/lib/hash_utils.py
|
9
|
1136
|
"""
Utilities related to hashing
This duplicates functionality in django-oauth-provider,
specifically long_token and short token functions which was used to create
random tokens
"""
import hashlib
from django.utils.encoding import force_bytes
from django.utils.crypto import get_random_string
from django.conf import settings
def create_hash256(max_length=None):
"""
Generate a hash that can be used as an application secret
Warning: this is not sufficiently secure for tasks like encription
Currently, this is just meant to create sufficiently random tokens
"""
hash_object = hashlib.sha256(force_bytes(get_random_string(32)))
hash_object.update(force_bytes(settings.SECRET_KEY))
output_hash = hash_object.hexdigest()
if max_length is not None and len(output_hash) > max_length:
return output_hash[:max_length]
return output_hash
def short_token():
"""
Generates a hash of length 32
Warning: this is not sufficiently secure for tasks like encription
Currently, this is just meant to create sufficiently random tokens
"""
return create_hash256(max_length=32)
|
agpl-3.0
|
rzhxeo/youtube-dl
|
youtube_dl/extractor/motorsport.py
|
129
|
1797
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
class MotorsportIE(InfoExtractor):
IE_DESC = 'motorsport.com'
_VALID_URL = r'http://www\.motorsport\.com/[^/?#]+/video/(?:[^/?#]+/)(?P<id>[^/]+)/?(?:$|[?#])'
_TEST = {
'url': 'http://www.motorsport.com/f1/video/main-gallery/red-bull-racing-2014-rules-explained/',
'info_dict': {
'id': '2-T3WuR-KMM',
'ext': 'mp4',
'title': 'Red Bull Racing: 2014 Rules Explained',
'duration': 208,
'description': 'A new clip from Red Bull sees Daniel Ricciardo and Sebastian Vettel explain the 2014 Formula One regulations – which are arguably the most complex the sport has ever seen.',
'uploader': 'mcomstaff',
'uploader_id': 'UC334JIYKkVnyFoNCclfZtHQ',
'upload_date': '20140903',
'thumbnail': r're:^https?://.+\.jpg$'
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
iframe_path = self._html_search_regex(
r'<iframe id="player_iframe"[^>]+src="([^"]+)"', webpage,
'iframe path')
iframe = self._download_webpage(
compat_urlparse.urljoin(url, iframe_path), display_id,
'Downloading iframe')
youtube_id = self._search_regex(
r'www.youtube.com/embed/(.{11})', iframe, 'youtube id')
return {
'_type': 'url_transparent',
'display_id': display_id,
'url': 'https://youtube.com/watch?v=%s' % youtube_id,
}
|
unlicense
|
movermeyer/cabot
|
cabot/cabotapp/migrations/0015_auto__add_field_alertacknowledgement_cancelled_time__add_field_alertac.py
|
5
|
16689
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AlertAcknowledgement.cancelled_time'
db.add_column(u'cabotapp_alertacknowledgement', 'cancelled_time',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
# Adding field 'AlertAcknowledgement.cancelled_user'
db.add_column(u'cabotapp_alertacknowledgement', 'cancelled_user',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='cancelleduser_set', null=True, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AlertAcknowledgement.cancelled_time'
db.delete_column(u'cabotapp_alertacknowledgement', 'cancelled_time')
# Deleting field 'AlertAcknowledgement.cancelled_user'
db.delete_column(u'cabotapp_alertacknowledgement', 'cancelled_user_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cabotapp.alertacknowledgement': {
'Meta': {'object_name': 'AlertAcknowledgement'},
'cancelled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'cancelled_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'cancelleduser_set'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabotapp.Service']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'cabotapp.alertplugin': {
'Meta': {'object_name': 'AlertPlugin'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cabotapp.alertplugin_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cabotapp.alertpluginuserdata': {
'Meta': {'unique_together': "(('title', 'user'),)", 'object_name': 'AlertPluginUserData'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cabotapp.alertpluginuserdata_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabotapp.UserProfile']"})
},
u'cabotapp.instance': {
'Meta': {'ordering': "['name']", 'object_name': 'Instance'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'alerts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.AlertPlugin']", 'symmetrical': 'False', 'blank': 'True'}),
'alerts_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hackpad_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hipchat_alert': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_alert_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'old_overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'sms_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_checks': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.StatusCheck']", 'symmetrical': 'False', 'blank': 'True'}),
'telephone_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users_to_notify': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
u'cabotapp.instancestatussnapshot': {
'Meta': {'object_name': 'InstanceStatusSnapshot'},
'did_send_alert': ('django.db.models.fields.IntegerField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'snapshots'", 'to': u"orm['cabotapp.Instance']"}),
'num_checks_active': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_failing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_passing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
u'cabotapp.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service'},
'alerts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.AlertPlugin']", 'symmetrical': 'False', 'blank': 'True'}),
'alerts_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hackpad_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hipchat_alert': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instances': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.Instance']", 'symmetrical': 'False', 'blank': 'True'}),
'last_alert_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'old_overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'sms_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_checks': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.StatusCheck']", 'symmetrical': 'False', 'blank': 'True'}),
'telephone_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'users_to_notify': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
u'cabotapp.servicestatussnapshot': {
'Meta': {'object_name': 'ServiceStatusSnapshot'},
'did_send_alert': ('django.db.models.fields.IntegerField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_checks_active': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_failing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_passing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'snapshots'", 'to': u"orm['cabotapp.Service']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
u'cabotapp.shift': {
'Meta': {'object_name': 'Shift'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'uid': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'cabotapp.statuscheck': {
'Meta': {'ordering': "['name']", 'object_name': 'StatusCheck'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allowed_num_failures': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'cached_health': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'calculated_status': ('django.db.models.fields.CharField', [], {'default': "'passing'", 'max_length': '50', 'blank': 'True'}),
'check_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'debounce': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'endpoint': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'expected_num_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'default': "'ERROR'", 'max_length': '30'}),
'last_run': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'max_queued_build_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'metric': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'password': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cabotapp.statuscheck_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status_code': ('django.db.models.fields.TextField', [], {'default': '200', 'null': 'True'}),
'text_match': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.IntegerField', [], {'default': '30', 'null': 'True'}),
'username': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'verify_ssl_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'cabotapp.statuscheckresult': {
'Meta': {'ordering': "['-time_complete']", 'object_name': 'StatusCheckResult'},
'check': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabotapp.StatusCheck']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'succeeded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'time_complete': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'})
},
u'cabotapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'fallback_alert_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hipchat_alias': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cabotapp']
|
mit
|
jeremiahmarks/sl4a
|
python/src/Lib/bsddb/test/test_dbshelve.py
|
33
|
11290
|
"""
TestCases for checking dbShelve objects.
"""
import os, string
import random
import unittest
from test_all import db, dbshelve, test_support, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
# We want the objects to be comparable so we can test dbshelve.values
# later on.
class DataClass:
def __init__(self):
self.value = random.random()
def __repr__(self) : # For Python 3.0 comparison
return "DataClass %f" %self.value
def __cmp__(self, other): # For Python 2.x comparison
return cmp(self.value, other)
class DBShelveTestCase(unittest.TestCase):
def setUp(self):
import sys
if sys.version_info[0] >= 3 :
from test_all import do_proxy_db_py3k
self._flag_proxy_db_py3k = do_proxy_db_py3k(False)
self.filename = get_new_database_path()
self.do_open()
def tearDown(self):
import sys
if sys.version_info[0] >= 3 :
from test_all import do_proxy_db_py3k
do_proxy_db_py3k(self._flag_proxy_db_py3k)
self.do_close()
test_support.unlink(self.filename)
def mk(self, key):
"""Turn key into an appropriate key type for this db"""
# override in child class for RECNO
import sys
if sys.version_info[0] < 3 :
return key
else :
return bytes(key, "iso8859-1") # 8 bits
def populateDB(self, d):
for x in string.letters:
d[self.mk('S' + x)] = 10 * x # add a string
d[self.mk('I' + x)] = ord(x) # add an integer
d[self.mk('L' + x)] = [x] * 10 # add a list
inst = DataClass() # add an instance
inst.S = 10 * x
inst.I = ord(x)
inst.L = [x] * 10
d[self.mk('O' + x)] = inst
# overridable in derived classes to affect how the shelf is created/opened
def do_open(self):
self.d = dbshelve.open(self.filename)
# and closed...
def do_close(self):
self.d.close()
def test01_basics(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_basics..." % self.__class__.__name__
self.populateDB(self.d)
self.d.sync()
self.do_close()
self.do_open()
d = self.d
l = len(d)
k = d.keys()
s = d.stat()
f = d.fd()
if verbose:
print "length:", l
print "keys:", k
print "stats:", s
self.assertEqual(0, d.has_key(self.mk('bad key')))
self.assertEqual(1, d.has_key(self.mk('IA')))
self.assertEqual(1, d.has_key(self.mk('OA')))
d.delete(self.mk('IA'))
del d[self.mk('OA')]
self.assertEqual(0, d.has_key(self.mk('IA')))
self.assertEqual(0, d.has_key(self.mk('OA')))
self.assertEqual(len(d), l-2)
values = []
for key in d.keys():
value = d[key]
values.append(value)
if verbose:
print "%s: %s" % (key, value)
self.checkrec(key, value)
dbvalues = d.values()
self.assertEqual(len(dbvalues), len(d.keys()))
import sys
if sys.version_info[0] < 3 :
values.sort()
dbvalues.sort()
self.assertEqual(values, dbvalues)
else : # XXX: Convert all to strings. Please, improve
values.sort(key=lambda x : str(x))
dbvalues.sort(key=lambda x : str(x))
self.assertEqual(repr(values), repr(dbvalues))
items = d.items()
self.assertEqual(len(items), len(values))
for key, value in items:
self.checkrec(key, value)
self.assertEqual(d.get(self.mk('bad key')), None)
self.assertEqual(d.get(self.mk('bad key'), None), None)
self.assertEqual(d.get(self.mk('bad key'), 'a string'), 'a string')
self.assertEqual(d.get(self.mk('bad key'), [1, 2, 3]), [1, 2, 3])
d.set_get_returns_none(0)
self.assertRaises(db.DBNotFoundError, d.get, self.mk('bad key'))
d.set_get_returns_none(1)
d.put(self.mk('new key'), 'new data')
self.assertEqual(d.get(self.mk('new key')), 'new data')
self.assertEqual(d[self.mk('new key')], 'new data')
def test02_cursors(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_cursors..." % self.__class__.__name__
self.populateDB(self.d)
d = self.d
count = 0
c = d.cursor()
rec = c.first()
while rec is not None:
count = count + 1
if verbose:
print rec
key, value = rec
self.checkrec(key, value)
# Hack to avoid conversion by 2to3 tool
rec = getattr(c, "next")()
del c
self.assertEqual(count, len(d))
count = 0
c = d.cursor()
rec = c.last()
while rec is not None:
count = count + 1
if verbose:
print rec
key, value = rec
self.checkrec(key, value)
rec = c.prev()
self.assertEqual(count, len(d))
c.set(self.mk('SS'))
key, value = c.current()
self.checkrec(key, value)
del c
def test03_append(self):
# NOTE: this is overridden in RECNO subclass, don't change its name.
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_append..." % self.__class__.__name__
self.assertRaises(dbshelve.DBShelveError,
self.d.append, 'unit test was here')
def checkrec(self, key, value):
# override this in a subclass if the key type is different
import sys
if sys.version_info[0] >= 3 :
if isinstance(key, bytes) :
key = key.decode("iso8859-1") # 8 bits
x = key[1]
if key[0] == 'S':
self.assertEqual(type(value), str)
self.assertEqual(value, 10 * x)
elif key[0] == 'I':
self.assertEqual(type(value), int)
self.assertEqual(value, ord(x))
elif key[0] == 'L':
self.assertEqual(type(value), list)
self.assertEqual(value, [x] * 10)
elif key[0] == 'O':
import sys
if sys.version_info[0] < 3 :
from types import InstanceType
self.assertEqual(type(value), InstanceType)
else :
self.assertEqual(type(value), DataClass)
self.assertEqual(value.S, 10 * x)
self.assertEqual(value.I, ord(x))
self.assertEqual(value.L, [x] * 10)
else:
self.assert_(0, 'Unknown key type, fix the test')
#----------------------------------------------------------------------
class BasicShelveTestCase(DBShelveTestCase):
def do_open(self):
self.d = dbshelve.DBShelf()
self.d.open(self.filename, self.dbtype, self.dbflags)
def do_close(self):
self.d.close()
class BTreeShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE
class HashShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_HASH
dbflags = db.DB_CREATE
class ThreadBTreeShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE | db.DB_THREAD
class ThreadHashShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_HASH
dbflags = db.DB_CREATE | db.DB_THREAD
#----------------------------------------------------------------------
class BasicEnvShelveTestCase(DBShelveTestCase):
def do_open(self):
self.env = db.DBEnv()
self.env.open(self.homeDir,
self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE)
self.filename = os.path.split(self.filename)[1]
self.d = dbshelve.DBShelf(self.env)
self.d.open(self.filename, self.dbtype, self.dbflags)
def do_close(self):
self.d.close()
self.env.close()
def setUp(self) :
self.homeDir = get_new_environment_path()
DBShelveTestCase.setUp(self)
def tearDown(self):
import sys
if sys.version_info[0] >= 3 :
from test_all import do_proxy_db_py3k
do_proxy_db_py3k(self._flag_proxy_db_py3k)
self.do_close()
test_support.rmtree(self.homeDir)
class EnvBTreeShelveTestCase(BasicEnvShelveTestCase):
envflags = 0
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE
class EnvHashShelveTestCase(BasicEnvShelveTestCase):
envflags = 0
dbtype = db.DB_HASH
dbflags = db.DB_CREATE
class EnvThreadBTreeShelveTestCase(BasicEnvShelveTestCase):
envflags = db.DB_THREAD
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE | db.DB_THREAD
class EnvThreadHashShelveTestCase(BasicEnvShelveTestCase):
envflags = db.DB_THREAD
dbtype = db.DB_HASH
dbflags = db.DB_CREATE | db.DB_THREAD
#----------------------------------------------------------------------
# test cases for a DBShelf in a RECNO DB.
class RecNoShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_RECNO
dbflags = db.DB_CREATE
def setUp(self):
BasicShelveTestCase.setUp(self)
# pool to assign integer key values out of
self.key_pool = list(range(1, 5000))
self.key_map = {} # map string keys to the number we gave them
self.intkey_map = {} # reverse map of above
def mk(self, key):
if key not in self.key_map:
self.key_map[key] = self.key_pool.pop(0)
self.intkey_map[self.key_map[key]] = key
return self.key_map[key]
def checkrec(self, intkey, value):
key = self.intkey_map[intkey]
BasicShelveTestCase.checkrec(self, key, value)
def test03_append(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_append..." % self.__class__.__name__
self.d[1] = 'spam'
self.d[5] = 'eggs'
self.assertEqual(6, self.d.append('spam'))
self.assertEqual(7, self.d.append('baked beans'))
self.assertEqual('spam', self.d.get(6))
self.assertEqual('spam', self.d.get(1))
self.assertEqual('baked beans', self.d.get(7))
self.assertEqual('eggs', self.d.get(5))
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBShelveTestCase))
suite.addTest(unittest.makeSuite(BTreeShelveTestCase))
suite.addTest(unittest.makeSuite(HashShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(RecNoShelveTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
apache-2.0
|
huxh10/iSDX
|
pctrl-sgdx/mds_lib.py
|
4
|
3123
|
def decompose_verbrokenheimerschnitzel(row_2_cols):
""" Input is a dictionary with matrix row IDs as keys and a list of each
row's contents (column indices of nonzero row entries) as a value.
BROKEN. DONUT USE.
"""
item_2_group = {}
item_buddies = {}
for group in row_2_cols.values():
for item in group:
# if we haven't seen this item before
if item not in item_2_group:
# initialize his mapping
item_2_group[item] = [group]
# and assume all his neighbors are buddies
item_buddies[item] = set(group)
# if we have seen him before
else:
# remove any fake buddies
item_buddies[item].intersection_update(group)
# update the mapping
item_2_group[item].append(group)
for item, buddies in item_buddies.iteritems():
item_buddies[item] = frozenset(buddies)
final_sets = {}
for row, group in row_2_cols.iteritems():
final_set = set([])
# add in all the corresponding prefix groups
for item in group:
final_set.add(item_buddies[item])
final_sets[row] = final_set
return final_sets
def decompose_sequential(col_2_rows):
""" Input is a dictionary with column IDs as keys and a list of each
column's contents (row indices of nonzero entries) as a value.
Input is a dictionary with prefixes as keys and lists of participants
as values. If a participant A has a route to p1, it will appear as
'sA' in the value list. If A is the default next-hop to p1, it will
also appear as 'bA' in the list.
Example: With participants [A,B,C] and prefixes [p1,p2,p3,p4],
an input could be {p1:[sA], p2:[sA,sB], p3:[sA,sB], p4:[sB,sC]}
Output is participant to prefix-group mapping.
Example: {sA:[[p1], [p2,p3]], sB:[[p2,p3], [p4]], sC:[[p4]]}
"""
# We will convert each column to a frozenset so that it can be hashed.
# By doing this, we can discover identical columns via hashing collisions.
# this dict recovers the original column IDs from each frozenset
froset_2_col = {}
# for each column
for col_id, col_contents in col_2_rows.iteritems():
# convert to a frozenset
froset = frozenset(col_contents)
# if we haven't see this exact column before
if froset not in froset_2_col:
# add a hash table entry
froset_2_col[froset] = []
# add it to the list of columns identical to it
froset_2_col[froset].append(col_id)
# The keys of froset_2_col are the unique columns we found.
# Unique columns correspond to prefix groups.
# For each participant in the column, add the associated prefix group
# to that participant's list of prefix groups
# this will be the final outout (i.e. participant_2_prefixGroups)
final_sets = {}
for froset, cols in froset_2_col.iteritems():
for row_id in froset:
if row_id not in final_sets:
final_sets[row_id] = []
final_sets[row_id].append(cols)
return final_sets
if __name__ == '__main__':
groups = {'a':[1,2,3], 'b':[2,3,4], 'c':[4]}
spuorg = {}
for key, values in groups.iteritems():
for item in values:
if item not in spuorg:
spuorg[item] = []
spuorg[item].append(key)
print groups
#print spuorg
print decompose_sequential(spuorg)
|
apache-2.0
|
MiltosD/CEF-ELRC
|
lib/python2.7/site-packages/django/utils/crypto.py
|
245
|
1443
|
"""
Django's standard crypto functions and utilities.
"""
import hmac
from django.conf import settings
from django.utils.hashcompat import sha_constructor, sha_hmac
def salted_hmac(key_salt, value, secret=None):
"""
Returns the HMAC-SHA1 of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY).
A different key_salt should be passed in for every application of HMAC.
"""
if secret is None:
secret = settings.SECRET_KEY
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function and
# SHA1 works nicely.
key = sha_constructor(key_salt + secret).digest()
# If len(key_salt + secret) > sha_constructor().block_size, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=value, digestmod=sha_hmac)
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
|
bsd-3-clause
|
guettli/django
|
tests/utils_tests/test_timezone.py
|
13
|
8369
|
import datetime
import pickle
import pytz
from django.test import SimpleTestCase, mock, override_settings
from django.utils import timezone
CET = pytz.timezone("Europe/Paris")
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
class TimezoneTests(SimpleTestCase):
def test_now(self):
with override_settings(USE_TZ=True):
self.assertTrue(timezone.is_aware(timezone.now()))
with override_settings(USE_TZ=False):
self.assertTrue(timezone.is_naive(timezone.now()))
def test_localdate(self):
naive = datetime.datetime(2015, 1, 1, 0, 0, 1)
with self.assertRaisesMessage(ValueError, 'localtime() cannot be applied to a naive datetime'):
timezone.localdate(naive)
with self.assertRaisesMessage(ValueError, 'localtime() cannot be applied to a naive datetime'):
timezone.localdate(naive, timezone=EAT)
aware = datetime.datetime(2015, 1, 1, 0, 0, 1, tzinfo=ICT)
self.assertEqual(timezone.localdate(aware, timezone=EAT), datetime.date(2014, 12, 31))
with timezone.override(EAT):
self.assertEqual(timezone.localdate(aware), datetime.date(2014, 12, 31))
with mock.patch('django.utils.timezone.now', return_value=aware):
self.assertEqual(timezone.localdate(timezone=EAT), datetime.date(2014, 12, 31))
with timezone.override(EAT):
self.assertEqual(timezone.localdate(), datetime.date(2014, 12, 31))
def test_override(self):
default = timezone.get_default_timezone()
try:
timezone.activate(ICT)
with timezone.override(EAT):
self.assertIs(EAT, timezone.get_current_timezone())
self.assertIs(ICT, timezone.get_current_timezone())
with timezone.override(None):
self.assertIs(default, timezone.get_current_timezone())
self.assertIs(ICT, timezone.get_current_timezone())
timezone.deactivate()
with timezone.override(EAT):
self.assertIs(EAT, timezone.get_current_timezone())
self.assertIs(default, timezone.get_current_timezone())
with timezone.override(None):
self.assertIs(default, timezone.get_current_timezone())
self.assertIs(default, timezone.get_current_timezone())
finally:
timezone.deactivate()
def test_override_decorator(self):
default = timezone.get_default_timezone()
@timezone.override(EAT)
def func_tz_eat():
self.assertIs(EAT, timezone.get_current_timezone())
@timezone.override(None)
def func_tz_none():
self.assertIs(default, timezone.get_current_timezone())
try:
timezone.activate(ICT)
func_tz_eat()
self.assertIs(ICT, timezone.get_current_timezone())
func_tz_none()
self.assertIs(ICT, timezone.get_current_timezone())
timezone.deactivate()
func_tz_eat()
self.assertIs(default, timezone.get_current_timezone())
func_tz_none()
self.assertIs(default, timezone.get_current_timezone())
finally:
timezone.deactivate()
def test_override_string_tz(self):
with timezone.override('Asia/Bangkok'):
self.assertEqual(timezone.get_current_timezone_name(), 'Asia/Bangkok')
def test_override_fixed_offset(self):
with timezone.override(timezone.FixedOffset(0, 'tzname')):
self.assertEqual(timezone.get_current_timezone_name(), 'tzname')
def test_activate_invalid_timezone(self):
with self.assertRaisesMessage(ValueError, 'Invalid timezone: None'):
timezone.activate(None)
def test_is_aware(self):
self.assertTrue(timezone.is_aware(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
self.assertFalse(timezone.is_aware(datetime.datetime(2011, 9, 1, 13, 20, 30)))
def test_is_naive(self):
self.assertFalse(timezone.is_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
self.assertTrue(timezone.is_naive(datetime.datetime(2011, 9, 1, 13, 20, 30)))
def test_make_aware(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
with self.assertRaises(ValueError):
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT)
def test_make_naive(self):
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30))
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30))
with self.assertRaisesMessage(ValueError, 'make_naive() cannot be applied to a naive datetime'):
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT)
def test_make_naive_no_tz(self):
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)),
datetime.datetime(2011, 9, 1, 5, 20, 30)
)
def test_make_aware_no_tz(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30)),
datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=timezone.get_fixed_timezone(-300))
)
def test_make_aware2(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 12, 20, 30), CET),
CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)))
with self.assertRaises(ValueError):
timezone.make_aware(CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)), CET)
def test_make_aware_pytz(self):
self.assertEqual(
timezone.make_naive(CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)), CET),
datetime.datetime(2011, 9, 1, 12, 20, 30))
self.assertEqual(
timezone.make_naive(
pytz.timezone("Asia/Bangkok").localize(datetime.datetime(2011, 9, 1, 17, 20, 30)), CET
),
datetime.datetime(2011, 9, 1, 12, 20, 30))
with self.assertRaisesMessage(ValueError, 'make_naive() cannot be applied to a naive datetime'):
timezone.make_naive(datetime.datetime(2011, 9, 1, 12, 20, 30), CET)
def test_make_aware_pytz_ambiguous(self):
# 2:30 happens twice, once before DST ends and once after
ambiguous = datetime.datetime(2015, 10, 25, 2, 30)
with self.assertRaises(pytz.AmbiguousTimeError):
timezone.make_aware(ambiguous, timezone=CET)
std = timezone.make_aware(ambiguous, timezone=CET, is_dst=False)
dst = timezone.make_aware(ambiguous, timezone=CET, is_dst=True)
self.assertEqual(std - dst, datetime.timedelta(hours=1))
self.assertEqual(std.tzinfo.utcoffset(std), datetime.timedelta(hours=1))
self.assertEqual(dst.tzinfo.utcoffset(dst), datetime.timedelta(hours=2))
def test_make_aware_pytz_non_existent(self):
# 2:30 never happened due to DST
non_existent = datetime.datetime(2015, 3, 29, 2, 30)
with self.assertRaises(pytz.NonExistentTimeError):
timezone.make_aware(non_existent, timezone=CET)
std = timezone.make_aware(non_existent, timezone=CET, is_dst=False)
dst = timezone.make_aware(non_existent, timezone=CET, is_dst=True)
self.assertEqual(std - dst, datetime.timedelta(hours=1))
self.assertEqual(std.tzinfo.utcoffset(std), datetime.timedelta(hours=1))
self.assertEqual(dst.tzinfo.utcoffset(dst), datetime.timedelta(hours=2))
def test_get_default_timezone(self):
self.assertEqual(timezone.get_default_timezone_name(), 'America/Chicago')
def test_fixedoffset_timedelta(self):
delta = datetime.timedelta(hours=1)
self.assertEqual(timezone.get_fixed_timezone(delta).utcoffset(''), delta)
def test_fixedoffset_pickle(self):
self.assertEqual(pickle.loads(pickle.dumps(timezone.FixedOffset(0, 'tzname'))).tzname(''), 'tzname')
|
bsd-3-clause
|
atopuzov/nitro-python
|
nssrc/com/citrix/netscaler/nitro/resource/config/cr/crvserver_lbvserver_binding.py
|
3
|
6480
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class crvserver_lbvserver_binding(base_resource) :
""" Binding class showing the lbvserver that can be bound to crvserver.
"""
def __init__(self) :
self._lbvserver = ""
self._hits = 0
self._name = ""
self.___count = 0
@property
def lbvserver(self) :
ur"""The Default target server name.<br/>Minimum length = 1.
"""
try :
return self._lbvserver
except Exception as e:
raise e
@lbvserver.setter
def lbvserver(self, lbvserver) :
ur"""The Default target server name.<br/>Minimum length = 1
"""
try :
self._lbvserver = lbvserver
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def hits(self) :
ur"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(crvserver_lbvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.crvserver_lbvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = crvserver_lbvserver_binding()
updateresource.name = resource.name
updateresource.lbvserver = resource.lbvserver
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [crvserver_lbvserver_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].lbvserver = resource[i].lbvserver
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = crvserver_lbvserver_binding()
deleteresource.name = resource.name
deleteresource.lbvserver = resource.lbvserver
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [crvserver_lbvserver_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].lbvserver = resource[i].lbvserver
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch crvserver_lbvserver_binding resources.
"""
try :
obj = crvserver_lbvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of crvserver_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_lbvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count crvserver_lbvserver_binding resources configued on NetScaler.
"""
try :
obj = crvserver_lbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of crvserver_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_lbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class crvserver_lbvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.crvserver_lbvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.crvserver_lbvserver_binding = [crvserver_lbvserver_binding() for _ in range(length)]
|
apache-2.0
|
zploskey/servo
|
tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/extensions.py
|
489
|
31780
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import common
from mod_pywebsocket import util
from mod_pywebsocket.http_header_util import quote_if_necessary
# The list of available server side extension processor classes.
_available_processors = {}
_compression_extension_names = []
class ExtensionProcessorInterface(object):
def __init__(self, request):
self._logger = util.get_class_logger(self)
self._request = request
self._active = True
def request(self):
return self._request
def name(self):
return None
def check_consistency_with_other_processors(self, processors):
pass
def set_active(self, active):
self._active = active
def is_active(self):
return self._active
def _get_extension_response_internal(self):
return None
def get_extension_response(self):
if not self._active:
self._logger.debug('Extension %s is deactivated', self.name())
return None
response = self._get_extension_response_internal()
if response is None:
self._active = False
return response
def _setup_stream_options_internal(self, stream_options):
pass
def setup_stream_options(self, stream_options):
if self._active:
self._setup_stream_options_internal(stream_options)
def _log_outgoing_compression_ratio(
logger, original_bytes, filtered_bytes, average_ratio):
# Print inf when ratio is not available.
ratio = float('inf')
if original_bytes != 0:
ratio = float(filtered_bytes) / original_bytes
logger.debug('Outgoing compression ratio: %f (average: %f)' %
(ratio, average_ratio))
def _log_incoming_compression_ratio(
logger, received_bytes, filtered_bytes, average_ratio):
# Print inf when ratio is not available.
ratio = float('inf')
if filtered_bytes != 0:
ratio = float(received_bytes) / filtered_bytes
logger.debug('Incoming compression ratio: %f (average: %f)' %
(ratio, average_ratio))
def _parse_window_bits(bits):
"""Return parsed integer value iff the given string conforms to the
grammar of the window bits extension parameters.
"""
if bits is None:
raise ValueError('Value is required')
# For non integer values such as "10.0", ValueError will be raised.
int_bits = int(bits)
# First condition is to drop leading zero case e.g. "08".
if bits != str(int_bits) or int_bits < 8 or int_bits > 15:
raise ValueError('Invalid value: %r' % bits)
return int_bits
class _AverageRatioCalculator(object):
"""Stores total bytes of original and result data, and calculates average
result / original ratio.
"""
def __init__(self):
self._total_original_bytes = 0
self._total_result_bytes = 0
def add_original_bytes(self, value):
self._total_original_bytes += value
def add_result_bytes(self, value):
self._total_result_bytes += value
def get_average_ratio(self):
if self._total_original_bytes != 0:
return (float(self._total_result_bytes) /
self._total_original_bytes)
else:
return float('inf')
class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
"""deflate-frame extension processor.
Specification:
http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate
"""
_WINDOW_BITS_PARAM = 'max_window_bits'
_NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._response_window_bits = None
self._response_no_context_takeover = False
self._bfinal = False
# Calculates
# (Total outgoing bytes supplied to this filter) /
# (Total bytes sent to the network after applying this filter)
self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
# Calculates
# (Total bytes received from the network) /
# (Total incoming bytes obtained after applying this filter)
self._incoming_average_ratio_calculator = _AverageRatioCalculator()
def name(self):
return common.DEFLATE_FRAME_EXTENSION
def _get_extension_response_internal(self):
# Any unknown parameter will be just ignored.
window_bits = None
if self._request.has_parameter(self._WINDOW_BITS_PARAM):
window_bits = self._request.get_parameter_value(
self._WINDOW_BITS_PARAM)
try:
window_bits = _parse_window_bits(window_bits)
except ValueError, e:
return None
no_context_takeover = self._request.has_parameter(
self._NO_CONTEXT_TAKEOVER_PARAM)
if (no_context_takeover and
self._request.get_parameter_value(
self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
return None
self._rfc1979_deflater = util._RFC1979Deflater(
window_bits, no_context_takeover)
self._rfc1979_inflater = util._RFC1979Inflater()
self._compress_outgoing = True
response = common.ExtensionParameter(self._request.name())
if self._response_window_bits is not None:
response.add_parameter(
self._WINDOW_BITS_PARAM, str(self._response_window_bits))
if self._response_no_context_takeover:
response.add_parameter(
self._NO_CONTEXT_TAKEOVER_PARAM, None)
self._logger.debug(
'Enable %s extension ('
'request: window_bits=%s; no_context_takeover=%r, '
'response: window_wbits=%s; no_context_takeover=%r)' %
(self._request.name(),
window_bits,
no_context_takeover,
self._response_window_bits,
self._response_no_context_takeover))
return response
def _setup_stream_options_internal(self, stream_options):
class _OutgoingFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._outgoing_filter(frame)
class _IncomingFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._incoming_filter(frame)
stream_options.outgoing_frame_filters.append(
_OutgoingFilter(self))
stream_options.incoming_frame_filters.insert(
0, _IncomingFilter(self))
def set_response_window_bits(self, value):
self._response_window_bits = value
def set_response_no_context_takeover(self, value):
self._response_no_context_takeover = value
def set_bfinal(self, value):
self._bfinal = value
def enable_outgoing_compression(self):
self._compress_outgoing = True
def disable_outgoing_compression(self):
self._compress_outgoing = False
def _outgoing_filter(self, frame):
"""Transform outgoing frames. This method is called only by
an _OutgoingFilter instance.
"""
original_payload_size = len(frame.payload)
self._outgoing_average_ratio_calculator.add_original_bytes(
original_payload_size)
if (not self._compress_outgoing or
common.is_control_opcode(frame.opcode)):
self._outgoing_average_ratio_calculator.add_result_bytes(
original_payload_size)
return
frame.payload = self._rfc1979_deflater.filter(
frame.payload, bfinal=self._bfinal)
frame.rsv1 = 1
filtered_payload_size = len(frame.payload)
self._outgoing_average_ratio_calculator.add_result_bytes(
filtered_payload_size)
_log_outgoing_compression_ratio(
self._logger,
original_payload_size,
filtered_payload_size,
self._outgoing_average_ratio_calculator.get_average_ratio())
def _incoming_filter(self, frame):
"""Transform incoming frames. This method is called only by
an _IncomingFilter instance.
"""
received_payload_size = len(frame.payload)
self._incoming_average_ratio_calculator.add_result_bytes(
received_payload_size)
if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
self._incoming_average_ratio_calculator.add_original_bytes(
received_payload_size)
return
frame.payload = self._rfc1979_inflater.filter(frame.payload)
frame.rsv1 = 0
filtered_payload_size = len(frame.payload)
self._incoming_average_ratio_calculator.add_original_bytes(
filtered_payload_size)
_log_incoming_compression_ratio(
self._logger,
received_payload_size,
filtered_payload_size,
self._incoming_average_ratio_calculator.get_average_ratio())
_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
_compression_extension_names.append(common.DEFLATE_FRAME_EXTENSION)
_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
_compression_extension_names.append(common.X_WEBKIT_DEFLATE_FRAME_EXTENSION)
def _parse_compression_method(data):
"""Parses the value of "method" extension parameter."""
return common.parse_extensions(data)
def _create_accepted_method_desc(method_name, method_params):
"""Creates accepted-method-desc from given method name and parameters"""
extension = common.ExtensionParameter(method_name)
for name, value in method_params:
extension.add_parameter(name, value)
return common.format_extension(extension)
class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
"""Base class for perframe-compress and permessage-compress extension."""
_METHOD_PARAM = 'method'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._compression_method_name = None
self._compression_processor = None
self._compression_processor_hook = None
def name(self):
return ''
def _lookup_compression_processor(self, method_desc):
return None
def _get_compression_processor_response(self):
"""Looks up the compression processor based on the self._request and
returns the compression processor's response.
"""
method_list = self._request.get_parameter_value(self._METHOD_PARAM)
if method_list is None:
return None
methods = _parse_compression_method(method_list)
if methods is None:
return None
comression_processor = None
# The current implementation tries only the first method that matches
# supported algorithm. Following methods aren't tried even if the
# first one is rejected.
# TODO(bashi): Need to clarify this behavior.
for method_desc in methods:
compression_processor = self._lookup_compression_processor(
method_desc)
if compression_processor is not None:
self._compression_method_name = method_desc.name()
break
if compression_processor is None:
return None
if self._compression_processor_hook:
self._compression_processor_hook(compression_processor)
processor_response = compression_processor.get_extension_response()
if processor_response is None:
return None
self._compression_processor = compression_processor
return processor_response
def _get_extension_response_internal(self):
processor_response = self._get_compression_processor_response()
if processor_response is None:
return None
response = common.ExtensionParameter(self._request.name())
accepted_method_desc = _create_accepted_method_desc(
self._compression_method_name,
processor_response.get_parameters())
response.add_parameter(self._METHOD_PARAM, accepted_method_desc)
self._logger.debug(
'Enable %s extension (method: %s)' %
(self._request.name(), self._compression_method_name))
return response
def _setup_stream_options_internal(self, stream_options):
if self._compression_processor is None:
return
self._compression_processor.setup_stream_options(stream_options)
def set_compression_processor_hook(self, hook):
self._compression_processor_hook = hook
def get_compression_processor(self):
return self._compression_processor
class PerMessageDeflateExtensionProcessor(ExtensionProcessorInterface):
"""permessage-deflate extension processor. It's also used for
permessage-compress extension when the deflate method is chosen.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-08
"""
_SERVER_MAX_WINDOW_BITS_PARAM = 'server_max_window_bits'
_SERVER_NO_CONTEXT_TAKEOVER_PARAM = 'server_no_context_takeover'
_CLIENT_MAX_WINDOW_BITS_PARAM = 'client_max_window_bits'
_CLIENT_NO_CONTEXT_TAKEOVER_PARAM = 'client_no_context_takeover'
def __init__(self, request, draft08=True):
"""Construct PerMessageDeflateExtensionProcessor
Args:
draft08: Follow the constraints on the parameters that were not
specified for permessage-compress but are specified for
permessage-deflate as on
draft-ietf-hybi-permessage-compression-08.
"""
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._preferred_client_max_window_bits = None
self._client_no_context_takeover = False
self._draft08 = draft08
def name(self):
return 'deflate'
def _get_extension_response_internal(self):
if self._draft08:
for name in self._request.get_parameter_names():
if name not in [self._SERVER_MAX_WINDOW_BITS_PARAM,
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
self._CLIENT_MAX_WINDOW_BITS_PARAM]:
self._logger.debug('Unknown parameter: %r', name)
return None
else:
# Any unknown parameter will be just ignored.
pass
server_max_window_bits = None
if self._request.has_parameter(self._SERVER_MAX_WINDOW_BITS_PARAM):
server_max_window_bits = self._request.get_parameter_value(
self._SERVER_MAX_WINDOW_BITS_PARAM)
try:
server_max_window_bits = _parse_window_bits(
server_max_window_bits)
except ValueError, e:
self._logger.debug('Bad %s parameter: %r',
self._SERVER_MAX_WINDOW_BITS_PARAM,
e)
return None
server_no_context_takeover = self._request.has_parameter(
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM)
if (server_no_context_takeover and
self._request.get_parameter_value(
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM) is not None):
self._logger.debug('%s parameter must not have a value: %r',
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
server_no_context_takeover)
return None
# client_max_window_bits from a client indicates whether the client can
# accept client_max_window_bits from a server or not.
client_client_max_window_bits = self._request.has_parameter(
self._CLIENT_MAX_WINDOW_BITS_PARAM)
if (self._draft08 and
client_client_max_window_bits and
self._request.get_parameter_value(
self._CLIENT_MAX_WINDOW_BITS_PARAM) is not None):
self._logger.debug('%s parameter must not have a value in a '
'client\'s opening handshake: %r',
self._CLIENT_MAX_WINDOW_BITS_PARAM,
client_client_max_window_bits)
return None
self._rfc1979_deflater = util._RFC1979Deflater(
server_max_window_bits, server_no_context_takeover)
# Note that we prepare for incoming messages compressed with window
# bits upto 15 regardless of the client_max_window_bits value to be
# sent to the client.
self._rfc1979_inflater = util._RFC1979Inflater()
self._framer = _PerMessageDeflateFramer(
server_max_window_bits, server_no_context_takeover)
self._framer.set_bfinal(False)
self._framer.set_compress_outgoing_enabled(True)
response = common.ExtensionParameter(self._request.name())
if server_max_window_bits is not None:
response.add_parameter(
self._SERVER_MAX_WINDOW_BITS_PARAM,
str(server_max_window_bits))
if server_no_context_takeover:
response.add_parameter(
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM, None)
if self._preferred_client_max_window_bits is not None:
if self._draft08 and not client_client_max_window_bits:
self._logger.debug('Processor is configured to use %s but '
'the client cannot accept it',
self._CLIENT_MAX_WINDOW_BITS_PARAM)
return None
response.add_parameter(
self._CLIENT_MAX_WINDOW_BITS_PARAM,
str(self._preferred_client_max_window_bits))
if self._client_no_context_takeover:
response.add_parameter(
self._CLIENT_NO_CONTEXT_TAKEOVER_PARAM, None)
self._logger.debug(
'Enable %s extension ('
'request: server_max_window_bits=%s; '
'server_no_context_takeover=%r, '
'response: client_max_window_bits=%s; '
'client_no_context_takeover=%r)' %
(self._request.name(),
server_max_window_bits,
server_no_context_takeover,
self._preferred_client_max_window_bits,
self._client_no_context_takeover))
return response
def _setup_stream_options_internal(self, stream_options):
self._framer.setup_stream_options(stream_options)
def set_client_max_window_bits(self, value):
"""If this option is specified, this class adds the
client_max_window_bits extension parameter to the handshake response,
but doesn't reduce the LZ77 sliding window size of its inflater.
I.e., you can use this for testing client implementation but cannot
reduce memory usage of this class.
If this method has been called with True and an offer without the
client_max_window_bits extension parameter is received,
- (When processing the permessage-deflate extension) this processor
declines the request.
- (When processing the permessage-compress extension) this processor
accepts the request.
"""
self._preferred_client_max_window_bits = value
def set_client_no_context_takeover(self, value):
"""If this option is specified, this class adds the
client_no_context_takeover extension parameter to the handshake
response, but doesn't reset inflater for each message. I.e., you can
use this for testing client implementation but cannot reduce memory
usage of this class.
"""
self._client_no_context_takeover = value
def set_bfinal(self, value):
self._framer.set_bfinal(value)
def enable_outgoing_compression(self):
self._framer.set_compress_outgoing_enabled(True)
def disable_outgoing_compression(self):
self._framer.set_compress_outgoing_enabled(False)
class _PerMessageDeflateFramer(object):
"""A framer for extensions with per-message DEFLATE feature."""
def __init__(self, deflate_max_window_bits, deflate_no_context_takeover):
self._logger = util.get_class_logger(self)
self._rfc1979_deflater = util._RFC1979Deflater(
deflate_max_window_bits, deflate_no_context_takeover)
self._rfc1979_inflater = util._RFC1979Inflater()
self._bfinal = False
self._compress_outgoing_enabled = False
# True if a message is fragmented and compression is ongoing.
self._compress_ongoing = False
# Calculates
# (Total outgoing bytes supplied to this filter) /
# (Total bytes sent to the network after applying this filter)
self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
# Calculates
# (Total bytes received from the network) /
# (Total incoming bytes obtained after applying this filter)
self._incoming_average_ratio_calculator = _AverageRatioCalculator()
def set_bfinal(self, value):
self._bfinal = value
def set_compress_outgoing_enabled(self, value):
self._compress_outgoing_enabled = value
def _process_incoming_message(self, message, decompress):
if not decompress:
return message
received_payload_size = len(message)
self._incoming_average_ratio_calculator.add_result_bytes(
received_payload_size)
message = self._rfc1979_inflater.filter(message)
filtered_payload_size = len(message)
self._incoming_average_ratio_calculator.add_original_bytes(
filtered_payload_size)
_log_incoming_compression_ratio(
self._logger,
received_payload_size,
filtered_payload_size,
self._incoming_average_ratio_calculator.get_average_ratio())
return message
def _process_outgoing_message(self, message, end, binary):
if not binary:
message = message.encode('utf-8')
if not self._compress_outgoing_enabled:
return message
original_payload_size = len(message)
self._outgoing_average_ratio_calculator.add_original_bytes(
original_payload_size)
message = self._rfc1979_deflater.filter(
message, end=end, bfinal=self._bfinal)
filtered_payload_size = len(message)
self._outgoing_average_ratio_calculator.add_result_bytes(
filtered_payload_size)
_log_outgoing_compression_ratio(
self._logger,
original_payload_size,
filtered_payload_size,
self._outgoing_average_ratio_calculator.get_average_ratio())
if not self._compress_ongoing:
self._outgoing_frame_filter.set_compression_bit()
self._compress_ongoing = not end
return message
def _process_incoming_frame(self, frame):
if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode):
self._incoming_message_filter.decompress_next_message()
frame.rsv1 = 0
def _process_outgoing_frame(self, frame, compression_bit):
if (not compression_bit or
common.is_control_opcode(frame.opcode)):
return
frame.rsv1 = 1
def setup_stream_options(self, stream_options):
"""Creates filters and sets them to the StreamOptions."""
class _OutgoingMessageFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, message, end=True, binary=False):
return self._parent._process_outgoing_message(
message, end, binary)
class _IncomingMessageFilter(object):
def __init__(self, parent):
self._parent = parent
self._decompress_next_message = False
def decompress_next_message(self):
self._decompress_next_message = True
def filter(self, message):
message = self._parent._process_incoming_message(
message, self._decompress_next_message)
self._decompress_next_message = False
return message
self._outgoing_message_filter = _OutgoingMessageFilter(self)
self._incoming_message_filter = _IncomingMessageFilter(self)
stream_options.outgoing_message_filters.append(
self._outgoing_message_filter)
stream_options.incoming_message_filters.append(
self._incoming_message_filter)
class _OutgoingFrameFilter(object):
def __init__(self, parent):
self._parent = parent
self._set_compression_bit = False
def set_compression_bit(self):
self._set_compression_bit = True
def filter(self, frame):
self._parent._process_outgoing_frame(
frame, self._set_compression_bit)
self._set_compression_bit = False
class _IncomingFrameFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._process_incoming_frame(frame)
self._outgoing_frame_filter = _OutgoingFrameFilter(self)
self._incoming_frame_filter = _IncomingFrameFilter(self)
stream_options.outgoing_frame_filters.append(
self._outgoing_frame_filter)
stream_options.incoming_frame_filters.append(
self._incoming_frame_filter)
stream_options.encode_text_message_to_utf8 = False
_available_processors[common.PERMESSAGE_DEFLATE_EXTENSION] = (
PerMessageDeflateExtensionProcessor)
# TODO(tyoshino): Reorganize class names.
_compression_extension_names.append('deflate')
class PerMessageCompressExtensionProcessor(
CompressionExtensionProcessorBase):
"""permessage-compress extension processor.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression
"""
_DEFLATE_METHOD = 'deflate'
def __init__(self, request):
CompressionExtensionProcessorBase.__init__(self, request)
def name(self):
return common.PERMESSAGE_COMPRESSION_EXTENSION
def _lookup_compression_processor(self, method_desc):
if method_desc.name() == self._DEFLATE_METHOD:
return PerMessageDeflateExtensionProcessor(method_desc, False)
return None
_available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = (
PerMessageCompressExtensionProcessor)
_compression_extension_names.append(common.PERMESSAGE_COMPRESSION_EXTENSION)
class MuxExtensionProcessor(ExtensionProcessorInterface):
"""WebSocket multiplexing extension processor."""
_QUOTA_PARAM = 'quota'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._quota = 0
self._extensions = []
def name(self):
return common.MUX_EXTENSION
def check_consistency_with_other_processors(self, processors):
before_mux = True
for processor in processors:
name = processor.name()
if name == self.name():
before_mux = False
continue
if not processor.is_active():
continue
if before_mux:
# Mux extension cannot be used after extensions
# that depend on frame boundary, extension data field, or any
# reserved bits which are attributed to each frame.
if (name == common.DEFLATE_FRAME_EXTENSION or
name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
self.set_active(False)
return
else:
# Mux extension should not be applied before any history-based
# compression extension.
if (name == common.DEFLATE_FRAME_EXTENSION or
name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION or
name == common.PERMESSAGE_COMPRESSION_EXTENSION or
name == common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION):
self.set_active(False)
return
def _get_extension_response_internal(self):
self._active = False
quota = self._request.get_parameter_value(self._QUOTA_PARAM)
if quota is not None:
try:
quota = int(quota)
except ValueError, e:
return None
if quota < 0 or quota >= 2 ** 32:
return None
self._quota = quota
self._active = True
return common.ExtensionParameter(common.MUX_EXTENSION)
def _setup_stream_options_internal(self, stream_options):
pass
def set_quota(self, quota):
self._quota = quota
def quota(self):
return self._quota
def set_extensions(self, extensions):
self._extensions = extensions
def extensions(self):
return self._extensions
_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor
def get_extension_processor(extension_request):
"""Given an ExtensionParameter representing an extension offer received
from a client, configures and returns an instance of the corresponding
extension processor class.
"""
processor_class = _available_processors.get(extension_request.name())
if processor_class is None:
return None
return processor_class(extension_request)
def is_compression_extension(extension_name):
return extension_name in _compression_extension_names
# vi:sts=4 sw=4 et
|
mpl-2.0
|
Nick-OpusVL/odoo
|
addons/association/__openerp__.py
|
260
|
1700
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Associations Management',
'version': '0.1',
'category': 'Specific Industry Applications',
'description': """
This module is to configure modules related to an association.
==============================================================
It installs the profile for associations to manage events, registrations, memberships,
membership products (schemes).
""",
'author': 'OpenERP SA',
'depends': ['base_setup', 'membership', 'event'],
'data': ['security/ir.model.access.csv', 'profile_association.xml'],
'demo': [],
'installable': True,
'auto_install': False,
'website': 'https://www.odoo.com'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
gangadhar-kadam/sapphire_app
|
hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py
|
2
|
2974
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cstr, cint
from webnotes import msgprint, _
def execute(filters=None):
if not filters: filters = {}
conditions, filters = get_conditions(filters)
columns = get_columns(filters)
att_map = get_attendance_list(conditions, filters)
emp_map = get_employee_details()
data = []
for emp in sorted(att_map):
emp_det = emp_map.get(emp)
row = [emp, emp_det.employee_name, emp_det.branch, emp_det.department, emp_det.designation,
emp_det.company]
total_p = total_a = 0.0
for day in range(filters["total_days_in_month"]):
status = att_map.get(emp).get(day + 1, "Absent")
status_map = {"Present": "P", "Absent": "A", "Half Day": "HD"}
row.append(status_map[status])
if status == "Present":
total_p += 1
elif status == "Absent":
total_a += 1
elif status == "Half Day":
total_p += 0.5
total_a += 0.5
row += [total_p, total_a]
data.append(row)
return columns, data
def get_columns(filters):
columns = [
"Employee:Link/Employee:120", "Employee Name::140", "Branch:Link/Branch:120",
"Department:Link/Department:120", "Designation:Link/Designation:120",
"Company:Link/Company:120"
]
for day in range(filters["total_days_in_month"]):
columns.append(cstr(day+1) +"::20")
columns += ["Total Present:Float:80", "Total Absent:Float:80"]
return columns
def get_attendance_list(conditions, filters):
attendance_list = webnotes.conn.sql("""select employee, day(att_date) as day_of_month,
status from tabAttendance where docstatus = 1 %s order by employee, att_date""" %
conditions, filters, as_dict=1)
att_map = {}
for d in attendance_list:
att_map.setdefault(d.employee, webnotes._dict()).setdefault(d.day_of_month, "")
att_map[d.employee][d.day_of_month] = d.status
return att_map
def get_conditions(filters):
if not (filters.get("month") and filters.get("fiscal_year")):
msgprint(_("Please select month and year"), raise_exception=1)
filters["month"] = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
from calendar import monthrange
filters["total_days_in_month"] = monthrange(cint(filters["fiscal_year"].split("-")[-1]),
filters["month"])[1]
conditions = " and month(att_date) = %(month)s and fiscal_year = %(fiscal_year)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_employee_details():
employee = webnotes.conn.sql("""select name, employee_name, designation, department,
branch, company from tabEmployee where docstatus < 2 and status = 'Active'""", as_dict=1)
emp_map = {}
for emp in employee:
emp_map[emp.name] = emp
return emp_map
|
agpl-3.0
|
nattayamairittha/MeanStack
|
MyProject/node_modules/node-sass-middleware/node_modules/node-sass/node_modules/node-gyp/gyp/tools/pretty_gyp.py
|
2618
|
4756
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
|
mit
|
starcraftman/pakit
|
pakit/main.py
|
2
|
14538
|
"""
The main entry point for pakit.
Acts as an intermediary between program arguments and pakit Tasks.
"""
from __future__ import absolute_import, print_function
import argparse
from argparse import RawDescriptionHelpFormatter as RawDescriptionHelp
import logging
import logging.handlers
import os
import sys
import pakit.conf
import pakit.recipe
import pakit.shell
from pakit import __version__
from pakit.conf import Config, InstallDB
from pakit.exc import PakitError, PakitDBError
from pakit.graph import DiGraph, topological_sort
from pakit.task import (
InstallTask, RemoveTask, UpdateTask, ListInstalled, ListAvailable,
DisplayTask, RelinkRecipes, SearchTask, CreateConfig, PurgeTask
)
PLOG = logging.getLogger('pakit').info
def create_args_parser():
"""
Create the program argument parser.
Returns:
An argparse parser object.
"""
prog_name = os.path.basename(os.path.dirname(sys.argv[0]))
mesg = """
{0} is a build tool providing a package manager like interface
to build & install recipes into local paths.
First Time Run:
Run in shell: `{1} --create-conf`
This writes the default config to $HOME/.pakit.yml
Run in shell: `export PATH=/tmp/pakit/links/bin:$PATH`
This is where all program bins will link to by default.
For project development or to report bugs:
https://github.com/starcraftman/pakit
Additional Information:
man pakit
man pakit_recipes
pydoc pakit
Recipes:
User made recipes can be put in $HOME/.pakit/recipes by default.
If two recipes have same name, last one in will be executed.
Example recipes can be found in the `pakit_recipes` module.
A good example is `pakit_recipes/ag.py`.
""".format(prog_name.capitalize(), prog_name)
mesg = mesg[0:-5]
parser = argparse.ArgumentParser(prog=prog_name, description=mesg,
formatter_class=RawDescriptionHelp)
parser.add_argument('-v', '--version', action='version',
version='pakit {0}\nALPHA!'.format(__version__))
parser.add_argument('-c', '--conf', help='the yaml config file to use')
subs = parser.add_subparsers(title='subcommands',
description='For additional help: '
'pakit <subcommand> -h')
sub = subs.add_parser('install',
description='Install specified RECIPE(s).')
sub.add_argument('recipes', nargs='+', metavar='RECIPE',
help='one or more RECIPE(s) to install')
sub.set_defaults(func=parse_install)
sub = subs.add_parser('remove', description='Remove specified RECIPE(s).')
sub.add_argument('recipes', nargs='+', metavar='RECIPE',
help='one or more RECIPE(s) to remove')
sub.set_defaults(func=parse_remove)
sub = subs.add_parser('update',
description='Update all recipes installed. '
'Alternatively, just specified RECIPE(s)')
sub.add_argument('recipes', nargs='*', default=(), metavar='RECIPE',
help='zero or more RECIPE(s) to update')
sub.set_defaults(func=parse_update)
sub = subs.add_parser('list',
description='List currently installed recipe(s).')
sub.add_argument('--short', default=False, action='store_true',
help='terse output format')
sub.set_defaults(func=parse_list)
sub = subs.add_parser('available',
description='List recipes available to install.')
sub.add_argument('--short', default=False, action='store_true',
help='terse output format')
sub.set_defaults(func=parse_available)
sub = subs.add_parser('display',
description='Information about selected RECIPE(s).')
sub.add_argument('recipes', nargs='+', metavar='RECIPE',
help='display information for one or more RECIPE(s)')
sub.set_defaults(func=parse_display)
desc = """Search for WORD(s) in the recipe database.
The matches for each WORD will be ORed together.
Default options:
- Substring match
- Case insensitive
- Matches against recipe name or description"""
sub = subs.add_parser('search', description=desc,
formatter_class=RawDescriptionHelp)
sub.add_argument('words', nargs='+', metavar='WORD',
help='search names & descriptions for WORD')
sub.add_argument('--case', default=False, action='store_true',
help='enable case sensitivity, default off')
sub.add_argument('--names', default=False, action='store_true',
help='only search against recipe name,'
' default name and description')
sub.set_defaults(func=parse_search)
sub = subs.add_parser('relink',
description='Relink all installed RECIPE(s).'
'\nAlternatively, relink specified RECIPE(s).',
formatter_class=RawDescriptionHelp)
sub.add_argument('recipes', nargs='*', metavar='RECIPE',
help='the RECIPE(s) to manage')
sub.set_defaults(func=parse_relink)
sub = subs.add_parser('create-conf',
description='(Over)write the selected pakit config.')
sub.set_defaults(func=parse_create_conf)
desc = """Remove most traces of pakit. No undo!
Will delete ...
- all links from the link directory to pakit's programs.
- all programs pakit built, including the source trees.
- all downloaded recipes.
- all logs and configs EXCEPT the pakit.yml file."""
sub = subs.add_parser('purge', description=desc,
formatter_class=RawDescriptionHelp)
sub.set_defaults(func=parse_purge)
return parser
def environment_check(config):
"""
Check the environment is correct.
Ensure network connection works.
Guarantee built programs always first in $PATH for Commands.
"""
bin_dir = os.path.join(config.path_to('link'), 'bin')
if os.environ['PATH'].find(bin_dir) == -1:
PLOG('To use built recipes %s must be on shell $PATH.', bin_dir)
PLOG(' For Most Shells: export PATH=%s:$PATH', bin_dir)
os.environ['PATH'] = bin_dir + ':' + os.environ['PATH']
def global_init(config_file):
"""
Performs global configuration of pakit.
Must be called before using pakit. Will ...
- Read user configuration.
- Initialize the logging system.
- Populate the recipe database.
- Create configured folders.
- Setup pakit man page.
Args:
config_file: The YAML configuration filename.
Returns:
The loaded config object.
"""
config = Config(config_file)
pakit.conf.CONFIG = config
log_init(config)
logging.debug('Global Config: %s', config)
PLOG('Loaded config from: ' + config.filename)
for path in config.get('pakit.paths').values():
try:
os.makedirs(path)
except OSError:
pass
prefix = config.path_to('prefix')
pakit.conf.IDB = InstallDB(os.path.join(prefix, 'idb.yml'))
logging.debug('InstallDB: %s', pakit.conf.IDB)
manager = pakit.recipe.RecipeManager(config)
manager.check_for_deletions()
manager.check_for_updates()
manager.init_new_uris()
recipe_db = pakit.recipe.RecipeDB(config)
for path in manager.paths:
recipe_db.index(path)
pakit.recipe.RDB = recipe_db
pakit.shell.link_man_pages(config.path_to('link'))
environment_check(config)
return config
def log_init(config):
"""
Setup project wide logging.
Specifically this will setup both file and console logging.
"""
log_file = config.get('pakit.log.file')
try:
os.makedirs(os.path.dirname(log_file))
except OSError:
pass
root = logging.getLogger()
root.setLevel(logging.DEBUG)
log_fmt = '%(levelname)s %(asctime)s %(threadName)s ' \
'%(filename)s %(message)s'
my_fmt = logging.Formatter(fmt=log_fmt, datefmt='[%d/%m %H%M.%S]')
while len(root.handlers):
root.removeHandler(root.handlers[0])
rot = logging.handlers.RotatingFileHandler(log_file,
mode='a',
maxBytes=2 ** 22,
backupCount=4)
rot.setLevel(logging.DEBUG)
rot.setFormatter(my_fmt)
root.addHandler(rot)
stream = logging.StreamHandler()
stream.setLevel(logging.ERROR)
stream.setFormatter(my_fmt)
root.addHandler(stream)
# Logger for informing user
pak = logging.getLogger('pakit')
while len(pak.handlers):
pak.removeHandler(pak.handlers[0])
pak.setLevel(logging.INFO)
pak_fmt = 'pakit %(asctime)s %(message)s'
pak_info = logging.Formatter(fmt=pak_fmt, datefmt='[%H:%M:%S]')
pak_stream = logging.StreamHandler()
pak_stream.setFormatter(pak_info)
pak.addHandler(pak_stream)
def add_deps_for(recipe_name, graph):
"""
Recursively add a recipe_name and all reqirements to the graph.
NOTE: Cycles may be present in the graph when recursion terminates.
Even so, this function will NOT recurse endlessly.
Args:
graph: A directed graph.
recipe_name: A recipe in RecipeDB.
Raises:
PakitDBError: No matching recipe in RecipeDB.
CycleInGraphError: A cycle was detected in the recursion.
"""
if recipe_name in graph:
return
recipe = pakit.recipe.RDB.get(recipe_name)
graph.add_vertex(recipe_name)
requires = getattr(recipe, 'requires', [])
graph.add_edges(recipe_name, requires)
for requirement in requires:
add_deps_for(requirement, graph)
def order_tasks(recipe_names, task_class):
"""
Order the recipes so that all dependencies can be met.
Args:
recipe_names: List of recipe names.
task_class: The Task to be carried out on the recipes.
Returns:
A list of task_class instances ordered to meet dependencies.
Raises:
CycleInGraphError: The dependencies could not be resolved
as there was a cycle in the dependency graph.
"""
graph = DiGraph()
for recipe_name in recipe_names:
add_deps_for(recipe_name, graph)
return [task_class(recipe_name) for recipe_name in topological_sort(graph)]
def parse_install(args):
"""
Parse args for InstallTask(s).
"""
return order_tasks(args.recipes, InstallTask)
def parse_remove(args):
"""
Parse args for RemoveTask(s).
"""
return [RemoveTask(recipe) for recipe in args.recipes]
def parse_update(args):
"""
Parse args for UpdateTask(s).
"""
tasks = None
if len(args.recipes) == 0:
to_update = [recipe for recipe in pakit.conf.IDB]
tasks = order_tasks(to_update, UpdateTask)
else:
to_update = [recipe for recipe in args.recipes
if recipe in pakit.conf.IDB]
not_installed = sorted(set(args.recipes).difference(to_update))
if len(not_installed):
PLOG('Recipe(s) not installed: ' + ', '.join(not_installed))
tasks = order_tasks(to_update, UpdateTask)
if len(tasks) == 0:
PLOG('Nothing to update.')
return tasks
def parse_display(args):
"""
Parse args for DisplayTasks.
"""
return [DisplayTask(prog) for prog in args.recipes]
def parse_available(args):
"""
Parse args for ListAvailable task.
"""
return [ListAvailable(args.short)]
def parse_list(args):
"""
Parse args for ListInstalled task.
"""
return [ListInstalled(args.short)]
def parse_relink(_):
"""
Parse args for RelinkRecipes task.
"""
return [RelinkRecipes()]
def parse_search(args):
"""
Parse args for DisplayTask(s).
"""
return [SearchTask(args)]
def parse_create_conf(args):
"""
Parse args for CreateConfig
"""
return [CreateConfig(args.conf)]
def parse_purge(_):
"""
Parse args for PurgeTask
"""
return [PurgeTask()]
def search_for_config(default_config=None):
"""
Search for the most relevant configuration file.
Will search from current dir upwards until root for config files matching:
- .pakit.yml
- .pakit.yaml
- pakit.yml
- pakit.yaml
If nothing found, search in $HOME and then $HOME/.pakit for same files.
If still nothing found, return default_config.
Args:
default_config: The default config if nothing else present.
Returns:
If a path existed, return found filename.
If no paths matched an existing file return the default_config.
"""
folders = [os.getcwd()]
cur_dir = os.path.dirname(os.getcwd())
while cur_dir != '/':
folders.append(cur_dir)
cur_dir = os.path.dirname(cur_dir)
folders.append(os.path.expanduser('~'))
folders.append(os.path.expanduser('~/.pakit'))
for folder in folders:
for conf in ['.pakit.yml', '.pakit.yaml', 'pakit.yml', 'pakit.yaml']:
config_file = os.path.join(folder, conf)
if os.path.exists(config_file):
return config_file
return default_config
def main(argv=None):
"""
The main entry point for this program.
Args:
argv: A list of program options, if None use sys.argv.
"""
if argv is None:
argv = sys.argv
parser = create_args_parser()
if len(argv) == 1:
logging.error('No arguments. What should I do?')
parser.print_usage()
sys.exit(1)
try:
args = parser.parse_args(argv[1:])
if not args.conf:
args.conf = search_for_config(os.path.expanduser('~/.pakit.yml'))
global_init(args.conf)
logging.debug('CLI: %s', args)
for task in args.func(args):
PLOG('Running: %s', str(task))
task.run()
except PakitDBError as exc:
PLOG(str(exc))
except PakitError as exc:
if not pakit.shell.check_connectivity():
logging.error('You do not seem to be connected to the internet.')
logging.error("Pakit can't do much without it!")
else:
logging.error(exc)
raise
if __name__ == '__main__':
main() # pragma: no cover
|
bsd-3-clause
|
x111ong/odoo
|
addons/mail/mail_group.py
|
247
|
12877
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import openerp
import openerp.tools as tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.safe_eval import safe_eval as eval
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
class mail_group(osv.Model):
""" A mail_group is a collection of users sharing messages in a discussion
group. The group mechanics are based on the followers. """
_description = 'Discussion group'
_name = 'mail.group'
_mail_flat_thread = False
_inherit = ['mail.thread']
_inherits = {'mail.alias': 'alias_id'}
def _get_image(self, cr, uid, ids, name, args, context=None):
result = {}
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'description': fields.text('Description'),
'menu_id': fields.many2one('ir.ui.menu', string='Related Menu', required=True, ondelete="cascade"),
'public': fields.selection([('public', 'Public'), ('private', 'Private'), ('groups', 'Selected Group Only')], 'Privacy', required=True,
help='This group is visible by non members. \
Invisible groups can add members through the invite button.'),
'group_public_id': fields.many2one('res.groups', string='Authorized Group'),
'group_ids': fields.many2many('res.groups', rel='mail_group_res_group_rel',
id1='mail_group_id', id2='groups_id', string='Auto Subscription',
help="Members of those groups will automatically added as followers. "\
"Note that they will be able to manage their subscription manually "\
"if necessary."),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Photo",
help="This field holds the image used as photo for the group, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized photo of the group. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the group. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="The email address associated with this group. New emails received will automatically "
"create new topics."),
}
def _get_default_employee_group(self, cr, uid, context=None):
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')
return ref and ref[1] or False
def _get_default_image(self, cr, uid, context=None):
image_path = openerp.modules.get_module_resource('mail', 'static/src/img', 'groupdefault.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
_defaults = {
'public': 'groups',
'group_public_id': _get_default_employee_group,
'image': _get_default_image,
}
def _generate_header_description(self, cr, uid, group, context=None):
header = ''
if group.description:
header = '%s' % group.description
if group.alias_id and group.alias_name and group.alias_domain:
if header:
header = '%s<br/>' % header
return '%sGroup email gateway: %s@%s' % (header, group.alias_name, group.alias_domain)
return header
def _subscribe_users(self, cr, uid, ids, context=None):
for mail_group in self.browse(cr, uid, ids, context=context):
partner_ids = []
for group in mail_group.group_ids:
partner_ids += [user.partner_id.id for user in group.users]
self.message_subscribe(cr, uid, ids, partner_ids, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# get parent menu
menu_parent = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'mail_group_root')
menu_parent = menu_parent and menu_parent[1] or False
# Create menu id
mobj = self.pool.get('ir.ui.menu')
menu_id = mobj.create(cr, SUPERUSER_ID, {'name': vals['name'], 'parent_id': menu_parent}, context=context)
vals['menu_id'] = menu_id
# Create group and alias
create_context = dict(context, alias_model_name=self._name, alias_parent_model_name=self._name, mail_create_nolog=True)
mail_group_id = super(mail_group, self).create(cr, uid, vals, context=create_context)
group = self.browse(cr, uid, mail_group_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [group.alias_id.id], {"alias_force_thread_id": mail_group_id, 'alias_parent_thread_id': mail_group_id}, context)
group = self.browse(cr, uid, mail_group_id, context=context)
# Create client action for this group and link the menu to it
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'action_mail_group_feeds')
if ref:
search_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'view_message_search')
params = {
'search_view_id': search_ref and search_ref[1] or False,
'domain': [
('model', '=', 'mail.group'),
('res_id', '=', mail_group_id),
],
'context': {
'default_model': 'mail.group',
'default_res_id': mail_group_id,
},
'res_model': 'mail.message',
'thread_level': 1,
'header_description': self._generate_header_description(cr, uid, group, context=context),
'view_mailbox': True,
'compose_placeholder': 'Send a message to the group',
}
cobj = self.pool.get('ir.actions.client')
newref = cobj.copy(cr, SUPERUSER_ID, ref[1], default={'params': str(params), 'name': vals['name']}, context=context)
mobj.write(cr, SUPERUSER_ID, menu_id, {'action': 'ir.actions.client,' + str(newref), 'mail_group_id': mail_group_id}, context=context)
if vals.get('group_ids'):
self._subscribe_users(cr, uid, [mail_group_id], context=context)
return mail_group_id
def unlink(self, cr, uid, ids, context=None):
groups = self.browse(cr, uid, ids, context=context)
alias_ids = [group.alias_id.id for group in groups if group.alias_id]
menu_ids = [group.menu_id.id for group in groups if group.menu_id]
# Delete mail_group
try:
all_emp_group = self.pool['ir.model.data'].get_object_reference(cr, uid, 'mail', 'group_all_employees')[1]
except ValueError:
all_emp_group = None
if all_emp_group and all_emp_group in ids:
raise osv.except_osv(_('Warning!'), _('You cannot delete those groups, as the Whole Company group is required by other modules.'))
res = super(mail_group, self).unlink(cr, uid, ids, context=context)
# Cascade-delete mail aliases as well, as they should not exist without the mail group.
self.pool.get('mail.alias').unlink(cr, SUPERUSER_ID, alias_ids, context=context)
# Cascade-delete menu entries as well
self.pool.get('ir.ui.menu').unlink(cr, SUPERUSER_ID, menu_ids, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
result = super(mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('group_ids'):
self._subscribe_users(cr, uid, ids, context=context)
# if description, name or alias is changed: update client action
if vals.get('description') or vals.get('name') or vals.get('alias_id') or vals.get('alias_name'):
cobj = self.pool.get('ir.actions.client')
for action in [group.menu_id.action for group in self.browse(cr, uid, ids, context=context)]:
new_params = action.params
new_params['header_description'] = self._generate_header_description(cr, uid, group, context=context)
cobj.write(cr, SUPERUSER_ID, [action.id], {'params': str(new_params)}, context=context)
# if name is changed: update menu
if vals.get('name'):
mobj = self.pool.get('ir.ui.menu')
mobj.write(cr, SUPERUSER_ID,
[group.menu_id.id for group in self.browse(cr, uid, ids, context=context)],
{'name': vals.get('name')}, context=context)
return result
def action_follow(self, cr, uid, ids, context=None):
""" Wrapper because message_subscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_subscribe_users(cr, uid, ids, context=context)
def action_unfollow(self, cr, uid, ids, context=None):
""" Wrapper because message_unsubscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_unsubscribe_users(cr, uid, ids, context=context)
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Show the suggestion of groups if display_groups_suggestions if the
user perference allows it."""
user = self.pool.get('res.users').browse(cr, uid, uid, context)
if not user.display_groups_suggestions:
return []
else:
return super(mail_group, self).get_suggested_thread(cr, uid, removed_suggested_threads, context)
def message_get_email_values(self, cr, uid, id, notif_mail=None, context=None):
res = super(mail_group, self).message_get_email_values(cr, uid, id, notif_mail=notif_mail, context=context)
group = self.browse(cr, uid, id, context=context)
headers = {}
if res.get('headers'):
try:
headers.update(eval(res['headers']))
except Exception:
pass
headers['Precedence'] = 'list'
# avoid out-of-office replies from MS Exchange
# http://blogs.technet.com/b/exchange/archive/2006/10/06/3395024.aspx
headers['X-Auto-Response-Suppress'] = 'OOF'
if group.alias_domain and group.alias_name:
headers['List-Id'] = '%s.%s' % (group.alias_name, group.alias_domain)
headers['List-Post'] = '<mailto:%s@%s>' % (group.alias_name, group.alias_domain)
# Avoid users thinking it was a personal message
# X-Forge-To: will replace To: after SMTP envelope is determined by ir.mail.server
list_to = '"%s" <%s@%s>' % (group.name, group.alias_name, group.alias_domain)
headers['X-Forge-To'] = list_to
res['headers'] = repr(headers)
return res
|
agpl-3.0
|
annarev/tensorflow
|
tensorflow/python/ops/parallel_for/array_test.py
|
9
|
16317
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vectorization of array kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ArrayTest(PForTestCase):
def test_gather(self):
x = random_ops.random_uniform([3, 3, 3, 3])
x2 = array_ops.placeholder_with_default(x, shape=None) # Has dynamic shape.
def loop_fn(i):
outputs = []
x_i = array_ops.gather(x, i)
for y in [x, x2, x_i]:
for axis in [0, 2, -1]:
outputs.append(array_ops.gather(y, 2, axis=axis))
outputs.append(
array_ops.gather(y, math_ops.cast(2, dtypes.int64), axis=axis))
outputs.append(
array_ops.gather(y, 2, axis=math_ops.cast(axis, dtypes.int64)))
outputs.append(
array_ops.gather(y, math_ops.cast(i, dtypes.int64), axis=axis))
outputs.append(array_ops.gather(y, [i], axis=axis))
outputs.append(array_ops.gather(y, [i, 2], axis=axis))
outputs.append(array_ops.gather(y, [[2, i], [i, 1]], axis=axis))
outputs.append(array_ops.gather(y, [0, 1, 2], axis=1, batch_dims=1))
outputs.append(array_ops.gather(y, [i, 1, 2], axis=2, batch_dims=1))
outputs.append(array_ops.gather(y, [[2, i], [i, 1], [2, 1]],
axis=-1, batch_dims=1))
outputs.append(
array_ops.gather(y, [[0, 1, 2]] * 3, axis=2, batch_dims=2))
outputs.append(array_ops.gather(y, [0, 1, 2], axis=1, batch_dims=-1))
outputs.append(
array_ops.gather(y, [[0, 1, 2]] * 3, axis=2, batch_dims=-2))
return outputs
self._test_loop_fn(loop_fn, 3)
def test_gather_nd(self):
x = random_ops.random_uniform([3, 3, 3])
def loop_fn(i):
outputs = []
x_i = array_ops.gather(x, i)
outputs.append(array_ops.gather_nd(x_i, [0], batch_dims=0))
outputs.append(array_ops.gather_nd(x_i, [i], batch_dims=0))
outputs.append(array_ops.gather_nd(x_i, [[i], [i], [i]], batch_dims=1))
return outputs
self._test_loop_fn(loop_fn, 3)
def test_shape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.shape(x_i), array_ops.shape(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3)
def test_size(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.size(x_i), array_ops.size(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3)
def test_rank(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.rank(x_i)
self._test_loop_fn(loop_fn, 3)
def test_shape_n(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return array_ops.shape_n([x_i, x, y,
y_i]), array_ops.shape_n([x_i, x, y, y_i],
out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3)
def test_reshape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.reshape(x1, [-1]), array_ops.reshape(x1, [1, 3, 1, -1])
self._test_loop_fn(loop_fn, 3)
def test_fill(self):
def loop_fn(i):
return array_ops.fill((2, 3), i)
self._test_loop_fn(loop_fn, 3)
def test_broadcast_to(self):
x = random_ops.random_uniform([3, 2, 1, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.broadcast_to(x1, [2, 2, 3]),
array_ops.broadcast_to(x1, [1, 2, 1, 3]))
self._test_loop_fn(loop_fn, 3)
def test_expand_dims(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return [
array_ops.expand_dims(x1, axis=-1),
array_ops.expand_dims(x1, axis=1),
array_ops.expand_dims(
x1, axis=constant_op.constant(1, dtype=dtypes.int64))
]
self._test_loop_fn(loop_fn, 3)
def test_one_hot(self):
indices = random_ops.random_uniform([3, 2, 3],
minval=0,
maxval=4,
dtype=dtypes.int32)
def loop_fn(i):
indices_i = array_ops.gather(indices, i)
return (array_ops.one_hot(indices_i, depth=4, on_value=2., off_value=-2.),
array_ops.one_hot(indices_i, depth=4, axis=1))
self._test_loop_fn(loop_fn, 3)
def test_searchsorted(self):
sorted_inputs = math_ops.cumsum(
random_ops.random_uniform([3, 2, 4]), axis=-1)
values = random_ops.random_uniform([2, 3], minval=-1, maxval=4.5)
def loop_fn(i):
inputs_i = array_ops.gather(sorted_inputs, i)
return [
array_ops.searchsorted(
inputs_i, values, out_type=dtypes.int32,
side="left"), # creates LowerBound op.
array_ops.searchsorted(
inputs_i, values, out_type=dtypes.int64, side="right")
] # creates UpperBound op.
self._test_loop_fn(loop_fn, 3)
def test_slice(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.slice(x1, begin=(0, 1), size=(2, 1))
self._test_loop_fn(loop_fn, 3)
def test_tile(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [2, 1])
self._test_loop_fn(loop_fn, 3)
def test_tile_loop_dependent(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [i, 1])
with self.assertRaisesRegex(ValueError, "expected to be loop invariant"):
pfor_control_flow_ops.pfor(loop_fn, 2, fallback_to_while_loop=False)
def test_pack(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.stack([x1, y], axis=-1)
self._test_loop_fn(loop_fn, 1)
def test_unpack(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.unstack(
x_i, 4, axis=-1), array_ops.unstack(
x_i, 3, axis=1)
self._test_loop_fn(loop_fn, 3)
def test_pad(self):
x = random_ops.random_uniform([3, 2, 3])
padding = constant_op.constant([[1, 2], [3, 4]])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.pad(x1, padding, mode="CONSTANT")
self._test_loop_fn(loop_fn, 3)
def test_split(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.split(x1, 2, axis=0), array_ops.split(x1, 3, axis=-1)
self._test_loop_fn(loop_fn, 3)
def test_split_v(self):
x = random_ops.random_uniform([3, 6, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.split(x1, [2, 1, 3],
axis=0), array_ops.split(x1, [3], axis=-1))
self._test_loop_fn(loop_fn, 3)
def test_squeeze(self):
x = random_ops.random_uniform([5, 1, 2, 1])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.squeeze(x1, axis=0), array_ops.squeeze(x1, axis=-1),
array_ops.squeeze(x1))
self._test_loop_fn(loop_fn, 3)
def test_reverse(self):
x = random_ops.random_uniform([3, 4, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.reverse(x1, axis=[0]),
array_ops.reverse(x1, axis=[-1]),
array_ops.reverse(x1, axis=[1, -1]))
self._test_loop_fn(loop_fn, 3)
def test_transpose(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.transpose(x1, [2, 1, 0])
self._test_loop_fn(loop_fn, 3)
def test_conjugate_transpose(self):
x = math_ops.complex(
random_ops.random_uniform([3, 2, 3, 4]),
random_ops.random_uniform([3, 2, 3, 4]))
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.conjugate_transpose(x_i, [2, 1, 0])
self._test_loop_fn(loop_fn, 3)
def test_zeros_like(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
z = array_ops.zeros_like(x1),
return z, z + x1
self._test_loop_fn(loop_fn, 3)
def test_concat_v2(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return [
array_ops.concat([x1, x1, y], axis=0),
array_ops.concat([x1, x1, y], axis=-1),
array_ops.concat([x1, x1, y],
axis=constant_op.constant(0, dtype=dtypes.int64))
]
self._test_loop_fn(loop_fn, 3)
def test_unary_cwise_ops(self):
for op in [array_ops.identity, array_ops.stop_gradient]:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y = op(x1) + x1
loss = nn.l2_loss(y)
return op(x), y, g.gradient(loss, x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_identity_n(self):
x = random_ops.random_uniform([3, 4])
def loop_fn(i):
return array_ops.identity_n([x, array_ops.gather(x, i)])
self._test_loop_fn(loop_fn, 3)
def test_matrix_band_part(self):
x = random_ops.random_uniform([3, 4, 2, 2])
for num_lower, num_upper in ((0, -1), (-1, 0), (1, 1)):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.matrix_band_part(
array_ops.gather(x, i), num_lower=num_lower, num_upper=num_upper)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_matrix_diag(self):
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
diagonal = array_ops.gather(x, i)
return array_ops.matrix_diag(
diagonal, k=(0, 1), num_rows=4, num_cols=5, align="RIGHT_LEFT")
self._test_loop_fn(loop_fn, 3)
def test_matrix_diag_part(self):
x = random_ops.random_uniform([3, 4, 6])
def loop_fn(i):
input = array_ops.gather(x, i) # pylint: disable=redefined-builtin
return array_ops.matrix_diag_part(
input, k=(-2, 0), padding_value=3, align="RIGHT_LEFT")
self._test_loop_fn(loop_fn, 3)
def test_diag(self):
for x in (random_ops.random_uniform([3, 4]),
random_ops.random_uniform([3, 4, 2])):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
inp = array_ops.gather(x, i)
return array_ops.diag(inp)
# pylint: disable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_diag_part(self):
for x in (random_ops.random_uniform([3, 2, 2]),
random_ops.random_uniform([3, 4, 2, 4, 2])):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
inp = array_ops.gather(x, i) # pylint: disable=redefined-builtin
return array_ops.diag_part(inp)
# pylint: disable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_matrix_set_diag(self):
matrices = random_ops.random_uniform([3, 4, 4])
diags = random_ops.random_uniform([3, 4])
bands = random_ops.random_uniform([3, 3, 4])
def loop_fn(i):
matrix_i = array_ops.gather(matrices, i)
diag_i = array_ops.gather(diags, i)
results = [
array_ops.matrix_set_diag(matrix_i, diag_i),
array_ops.matrix_set_diag(matrices[0, ...], diag_i),
array_ops.matrix_set_diag(matrix_i, diags[0, ...]),
]
k = (-1, 1)
band_i = array_ops.gather(bands, i)
for align in ["RIGHT_LEFT", "LEFT_RIGHT"]:
results.extend([
array_ops.matrix_set_diag(matrix_i, band_i, k=k, align=align),
array_ops.matrix_set_diag(
matrices[0, ...], band_i, k=k, align=align),
array_ops.matrix_set_diag(
matrix_i, bands[0, ...], k=k, align=align)
])
return results
self._test_loop_fn(loop_fn, 3)
def test_strided_slice(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 4, 4, 2, 2, 2])
g.watch(x)
def loop_fn(i):
with g:
x_i = array_ops.gather(x, i)
y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
loss = nn.l2_loss(y)
return y, g.gradient(loss, x_i)
self._test_loop_fn(loop_fn, 3)
def test_strided_slice_loop_variant(self):
x = random_ops.random_uniform([3, 3, 4, 4, 2, 2, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return x_i[i:i+1, ...]
# Test the fallback to while loop for a ConversionNotImplementedError is
# handled.
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=True)
# Without fallback, ValueError is thrown.
with self.assertRaisesRegex(ValueError, "expected to be loop invariant"):
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=False)
def test_depth_to_space(self):
x = random_ops.random_uniform([2, 3, 2, 2, 12])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.depth_to_space(x1, 2, data_format="NHWC")
self._test_loop_fn(loop_fn, 2)
def test_space_to_depth(self):
x = random_ops.random_uniform([2, 3, 12, 12, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.space_to_depth(x1, 2, data_format="NHWC")
self._test_loop_fn(loop_fn, 2)
def test_batch_to_space_nd(self):
x = random_ops.random_uniform([7, 5 * 2 * 3, 2, 2, 3, 2])
block_shapes = [2, 3]
crops = [[1, 2], [1, 0]]
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.batch_to_space_nd(x1, block_shapes, crops)
self._test_loop_fn(loop_fn, 7)
def test_space_to_batch_nd(self):
x = random_ops.random_uniform([7, 5, 2 * 2 - 3, 2 * 3 - 1, 3, 2])
block_shapes = [2, 3]
paddings = [[1, 2], [1, 0]]
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.space_to_batch_nd(x1, block_shapes, paddings)
self._test_loop_fn(loop_fn, 7)
def test_check_numerics(self):
x = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.check_numerics(x_i, "test_message")
self._test_loop_fn(loop_fn, 2)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
vmindru/ansible
|
lib/ansible/modules/network/aci/mso_schema_template_anp_epg_subnet.py
|
4
|
7529
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_anp_epg_subnet
short_description: Manage EPG subnets in schema templates
description:
- Manage EPG subnets in schema templates on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template to change.
type: list
required: yes
anp:
description:
- The name of the ANP.
type: str
required: yes
epg:
description:
- The name of the EPG to manage.
type: str
required: yes
ip:
description:
- The IP range in CIDR notation.
type: str
required: true
description:
description:
- The description of this subnet.
type: str
scope:
description:
- The scope of the subnet.
type: str
choices: [ private, public ]
shared:
description:
- Whether this subnet is shared between VRFs.
type: bool
no_default_gateway:
description:
- Whether this subnet has a default gateway.
type: bool
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
notes:
- Due to restrictions of the MSO REST API concurrent modifications to EPG subnets can be dangerous and corrupt data.
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new subnet to an EPG
mso_schema_template_anp_subnet:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
ip: 10.0.0.0/24
state: present
delegate_to: localhost
- name: Remove an EPG
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
ip: 10.0.0.0/24
state: absent
delegate_to: localhost
- name: Query a specific EPG
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
ip: 10.0.0.0/24
state: query
delegate_to: localhost
register: query_result
- name: Query all EPGs
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_reference_spec, mso_subnet_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
anp=dict(type='str', required=True),
epg=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
argument_spec.update(mso_subnet_spec())
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['ip']],
['state', 'present', ['ip']],
],
)
schema = module.params['schema']
template = module.params['template']
anp = module.params['anp']
epg = module.params['epg']
ip = module.params['ip']
description = module.params['description']
scope = module.params['scope']
shared = module.params['shared']
no_default_gateway = module.params['no_default_gateway']
state = module.params['state']
mso = MSOModule(module)
# Get schema
schema_obj = mso.get_obj('schemas', displayName=schema)
if not schema_obj:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
# Get template
templates = [t['name'] for t in schema_obj['templates']]
if template not in templates:
mso.fail_json(msg="Provided template '{template}' does not exist. Existing templates: {templates}".format(template=template,
templates=', '.join(templates)))
template_idx = templates.index(template)
# Get ANP
anps = [a['name'] for a in schema_obj['templates'][template_idx]['anps']]
if anp not in anps:
mso.fail_json(msg="Provided anp '{anp}' does not exist. Existing anps: {anps}".format(anp=anp, anps=', '.join(anps)))
anp_idx = anps.index(anp)
# Get EPG
epgs = [e['name'] for e in schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs']]
if epg not in epgs:
mso.fail_json(msg="Provided epg '{epg}' does not exist. Existing epgs: {epgs}".format(epg=epg, epgs=', '.join(epgs)))
epg_idx = epgs.index(epg)
# Get Subnet
subnets = [s['ip'] for s in schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs'][epg_idx]['subnets']]
if ip in subnets:
ip_idx = subnets.index(ip)
# FIXME: Changes based on index are DANGEROUS
subnet_path = '/templates/{0}/anps/{1}/epgs/{2}/subnets/{3}'.format(template, anp, epg, ip_idx)
mso.existing = schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs'][epg_idx]['subnets'][ip_idx]
if state == 'query':
if ip is None:
mso.existing = schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs'][epg_idx]['subnets']
elif not mso.existing:
mso.fail_json(msg="Subnet '{ip}' not found".format(ip=ip))
mso.exit_json()
subnets_path = '/templates/{0}/anps/{1}/epgs/{2}/subnets'.format(template, anp, epg)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.existing = {}
ops.append(dict(op='remove', path=subnet_path))
elif state == 'present':
if description is None and not mso.existing:
description = ip
if scope is None and not mso.existing:
scope = 'private'
if shared is None and not mso.existing:
shared = False
if no_default_gateway is None and not mso.existing:
no_default_gateway = False
payload = dict(
ip=ip,
description=description,
scope=scope,
shared=shared,
noDefaultGateway=no_default_gateway,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=subnet_path, value=mso.sent))
else:
ops.append(dict(op='add', path=subnets_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.