repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
flgiordano/netcash | +/google-cloud-sdk/lib/surface/bigquery/jobs/show_rows.py | 1 | 2134 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of gcloud bigquery jobs show-rows.
"""
from googlecloudsdk.api_lib.bigquery import bigquery
from googlecloudsdk.calliope import base
from googlecloudsdk.core.console import console_io
class JobsShowRows(base.Command):
"""Displays selected rows in the result of a query job.
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'--limit',
type=int,
default=bigquery.DEFAULT_RESULTS_LIMIT,
help='The maximum number of rows to display.')
parser.add_argument(
'--start-row',
type=int,
default=0,
help='The number of rows to skip before showing table data.')
parser.add_argument('job_id', help='The job ID of the asynchronous query.')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace, All the arguments that were provided to this
command invocation.
Returns:
A bigquery.QueryResults object.
"""
job = bigquery.Job.ResolveFromId(args.job_id)
return job.GetQueryResults(start_row=args.start_row, max_rows=args.limit)
def Display(self, args, query_results):
"""This method is called to print the result of the Run() method.
Args:
args: The arguments that the command was run with.
query_results: A bigquery.QueryResults object.
"""
console_io.PrintExtendedList(query_results,
query_results.GetColumnFetchers())
| bsd-3-clause | -6,707,530,095,116,616,000 | 32.34375 | 79 | 0.695876 | false | 4.095969 | false | false | false |
snowdreamist/wfpy | src/wfpy/internal.py | 1 | 10137 | # encoding=utf8
# wfpy internal implementation
# [email protected]
from contextlib import contextmanager
from threading import Thread, Lock, Semaphore
@contextmanager
def lock(lock):
try:
lock.acquire(True)
yield
finally:
lock.release()
class InternalLogger(object):
"""
Internal logger
"""
def __init__(self, name, logger):
self.__name = name
self.__logger = logger
@property
def name(self):
return self.__name
def error(self, message, error = None, paths = None):
"""
Log error
Parameters:
message error message
error exception object
"""
if paths is None:
paths = [ self.name ]
else:
paths.append(self.name)
self.__logger.error(message, error, paths)
def info(self, message, error = None, paths = None):
"""
Log info
"""
if paths is None:
paths = [ self.name ]
else:
paths.append(self.name)
self.__logger.info(message, error, paths)
def warn(self, message, error = None, paths = None):
"""
Log warn
"""
if paths is None:
paths = [ self.name ]
else:
paths.append(self.name)
self.__logger.warn(message, error, paths)
def debug(self, message, error = None, paths = None):
"""
Log debug
"""
if paths is None:
paths = [ self.name ]
else:
paths.append(self.name)
self.__logger.debug(message, error, paths)
class InternalRootLogger(InternalLogger):
"""
Root internal logger
"""
def __init__(self, sysLogger, logCallback = None):
"""
Parameters:
sysLogger python built-in logger
logCallback callback on log event
callback function signature: callbackHandler(level, path, message, error)
"""
self.__sysLogger = sysLogger
self.__logCallback = logCallback
super(InternalRootLogger, self).__init__('root', None)
def __formatexception__(self, error):
"""
Format exception
"""
pass
def __log__(self, level, paths, message, error):
"""
Do log
"""
if paths is None:
path = ''
else:
path = '.'.join(reversed(paths))
# Callback
cbError = None
if not self.__logCallback is None:
try:
self.__logCallback(level, path, message, error)
except Exception as callbackError:
cbError = callbackError
# Log
if isinstance(path, unicode):
path = path.encode('utf8')
if isinstance(message, unicode):
message = message.encode('utf8')
# Get method
if level == 'error':
logfunc = self.__sysLogger.error
elif level == 'warn':
logfunc = self.__sysLogger.warn
elif level == 'debug':
logfunc = self.__sysLogger.debug
else:
logfunc = self.__sysLogger.info
logfunc('[%s]:[%s]', path, message)
if not error is None:
logfunc('[%s]:[Related exception]:[%s]', path, self.__formatexception__(error))
if not cbError is None:
self.__sysLogger.warn('[Logger][%s]:[Callback failed]', path)
self.__sysLogger.warn('[Logger][%s]:[Related exception]:[%s]', path, self.__formatexception__(cbError))
def error(self, message, error = None, paths = None):
"""
Log error
"""
self.__log__('error', paths, message, error)
def info(self, message, error, paths = None):
"""
Log info
"""
self.__log__('info', paths, message, error)
def warn(self, message, error = None, paths = None):
"""
Log warn
"""
self.__log__('warn', paths, message, error)
def debug(self, message, error = None, paths = None):
"""
Log debug
"""
self.__log__('debug', paths, message, error)
class Dispatcher(object):
"""
The worker dispatcher
"""
class Instance(object):
"""
Dispatcher instance
"""
def __init__(self, method, args, kwargs):
self.method = method
self.args = args
self.kwargs = kwargs
def __call__(self):
"""
Invoke
"""
args = self.args if not self.args is None else []
kwargs = self.kwargs if not self.kwargs is None else {}
return self.method(*args, **kwargs)
def __init__(self, name, logger):
self.__thread = Thread(name = name, target = self.__worker__)
self.__lock = Lock()
self.__sema = Semaphore(0)
self.__isStopping = False
self.__isRunning = False
self.__queue = []
self.__logger = InternalLogger('dispatcher', logger)
def run(self):
"""
Start running
"""
if self.__isStopping:
raise RuntimeError('Cannot run when stopping or stopped')
if self.__isRunning:
raise RuntimeError('Cannot run when running')
self.__isRunning = True
self.__thread.start()
self.__logger.info('Started')
def stop(self):
"""
Stop running
"""
self.__isStopping = True
self.__sema.release() # Notify worker thread
def invoke(self, method, args = None, kwargs = None):
"""
Add an method
"""
if self.__isStopping:
raise RuntimeError('Cannot invoke when stopping')
if method is None:
raise ValueError('Argument method cannot be None')
instance = Dispatcher.Instance(method, args, kwargs)
with lock(self.__lock):
self.__queue.append(instance)
self.__sema.release()
def __worker__(self):
"""
The dispatcher worker
"""
while True:
# Wait until new request in
self.__sema.acquire()
# Check if the dispatcher is stopping
# If so ignore all existing invokings and exit
if self.__isStopping:
break
with lock(self.__lock):
# Test empty
if len(self.__queue) == 0:
continue
# Pop an instance
instance = self.__queue.pop(0)
# Do it
try:
instance()
except Exception as error:
# Something went wrong, do error report
self.logger.error('Invoke failed', error)
# Exit
if len(self.__queue) > 0:
self.logger.warn('Abandon %d invoking instance on exiting' % len(self.__queue))
self.logger.info('Exit')
class Event(object):
"""
Event object
"""
def __init__(self):
self.__events = []
def add(self, handler):
"""
Add an event handler
"""
self.__events.append(handler)
def remove(self, handler):
"""
Remove an event handler
"""
self.__events.remove(handler)
def clear(self):
"""
Clear all events
"""
self.__events = []
def __call__(self, sender, eventArgs):
"""
Raise this event
"""
for handler in this.__events:
handler(sender, eventArgs)
class WorkerInstance(object):
"""
A worker instance
"""
def __init__(self, worker):
self.__worker = worker
@property
def worker(self):
return self.__worker
class Workerpool(object):
"""
The worker pool
"""
def __init__(self):
self.__queues = {}
self.__lock = Lock()
def add(self, instance):
"""
Add an worker instance
"""
self[instance.worker.guid].append(instance)
def __getitem__(self, guid):
"""
Get a queue or create one by guid
"""
with lock(self.__lock):
if not guid in self.__queues:
queue = WorkerQueue()
self.__queues[guid] = queue
else:
queue = self.__queues[guid]
return queue
class WorkerQueue(object):
"""
The worker queue
"""
def __init__(self):
self.__lst = []
self.__lock = Lock()
def append(self, instance):
"""
Add a worker instance
"""
with lock(self.__lock):
self.__lst.append(instance)
def pop(self):
"""
Pop a worker instance
None if the queue is empty
"""
with lock(self.__lock):
if len(self.__lst) == 0:
return None
return self.__lst.pop(0)
class Components(object):
"""
Component container
NOTE:
Why not use PluginRepo as Components directly?
Since there're some components are application wide and some are workflow wide.
The scopes are not consistent
"""
def __init__(self, pluginRepo):
self.pluginRepo = pluginRepo
self.__comsdct = {}
def __contains__(self, name):
"""
Check component
"""
return name in self.__comsdct
def __getitem__(self, name):
"""
Get component
"""
return self.__comsdct[name]
def __setitem__(self, name, value):
"""
Set component
"""
self.__comsdct[name] = value
class CallbackInvoker(object):
"""
Support all callback invoking
"""
def __init__(self, components):
self.__dispatcher = components['dispatcher']
def invoke(self, callbackHandler, *args, **kwargs):
"""
Invoke callbackHandler
NOTE:
All callback should run in dispatcher in order to avoid multi-thread problems
"""
self.__dispatcher.invoke(callbackHandler, args, kwargs) | gpl-3.0 | -8,564,515,949,999,352,000 | 25.539267 | 115 | 0.511788 | false | 4.570334 | false | false | false |
j4321/MyNotes | mynoteslib/mytext.py | 1 | 28096 | #! /usr/bin/python3
# -*- coding:Utf-8 -*-
"""
MyNotes - Sticky notes/post-it
Copyright 2016-2019 Juliette Monsel <[email protected]>
MyNotes is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MyNotes is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Text class with improved undo/redo
"""
import os
import re
from tkinter import Text, TclError
from tkinter.font import Font
from tkinter.ttk import Checkbutton
from mynoteslib.constants import CONFIG, TEXT_COLORS, PATH_LATEX, AUTOCORRECT, \
text_ranges, sorting
class Checkbox(Checkbutton):
def __init__(self, master, **kw):
Checkbutton.__init__(self, master, command=self.command, **kw)
def command(self):
self.master.cb_state_change(self, 'selected' in self.state())
class MyText(Text):
def __init__(self, master=None, mode='note', cb_style="TCheckbutton", **kw):
Text.__init__(self, master, wrap='word', undo=False,
autoseparator=False, tabs=(10, 'right', 21, 'left'),
relief="flat", borderwidth=0,
highlightthickness=0, **kw)
self.mode = mode
self.cb_style = cb_style
self.links = {}
self.latex = {}
self._current_word = ""
# --- undo/redo
self._undo_stack = [[]]
self._redo_stack = []
size = CONFIG.get("Font", "text_size")
font_text = "%s %s" % (CONFIG.get("Font", "text_family").replace(" ", "\ "), size)
mono = "%s %s" % (CONFIG.get("Font", "mono").replace(" ", "\ "), size)
self.configure(font=font_text)
# --- tags
self.tag_configure("mono", font=mono)
self.tag_configure("bold", font="%s bold" % font_text)
self.tag_configure("italic", font="%s italic" % font_text)
self.tag_configure("bold-italic", font="%s bold italic" % font_text)
try: # only >= tk8.6.6 support selectforeground
self.tag_configure("underline", underline=True,
selectforeground="white")
self.tag_configure("overstrike", overstrike=True,
selectforeground="white")
self.tag_configure("link", foreground="blue", underline=True,
selectforeground="white")
self.tag_configure("file", foreground="blue", underline=True,
selectforeground="white")
for coul in TEXT_COLORS.values():
self.tag_configure(coul, foreground=coul,
selectforeground="white")
self.tag_configure(coul + "-underline", foreground=coul,
selectforeground="white", underline=True)
self.tag_configure(coul + "-overstrike", foreground=coul,
overstrike=True, selectforeground="white")
except TclError:
self.tag_configure("underline", underline=True)
self.tag_configure("overstrike", overstrike=True)
self.tag_configure("link", foreground="blue", underline=True)
self.tag_configure("file", foreground="blue", underline=True)
for coul in TEXT_COLORS.values():
self.tag_configure(coul, foreground=coul)
self.tag_configure(coul + "-underline", foreground=coul,
underline=True)
self.tag_configure(coul + "-overstrike", foreground=coul,
overstrike=True)
self.tag_configure("center", justify="center")
self.tag_configure("left", justify="left")
self.tag_configure("right", justify="right")
self.tag_configure("list", lmargin1=0, lmargin2=21,
tabs=(10, 'right', 21, 'left'))
self.tag_configure("todolist", lmargin1=0, lmargin2=21,
tabs=(10, 'right', 21, 'left'))
margin = 2 * Font(self, font=font_text).measure("m")
self.tag_configure("enum", lmargin1=0, lmargin2=margin + 5,
tabs=(margin, 'right', margin + 5, 'left'))
# --- bindings
self.bind('<Key>', self._on_keypress)
self.bind('<Control-Key>', self._on_ctrl_keypress)
self.bind('<Control-z>', self.undo)
self.bind('<Control-y>', self.redo)
self.bind_class('Text', '<Control-y>', lambda e: None)
self.tag_bind("link", "<Enter>",
lambda event: self.configure(cursor="hand1"))
self.tag_bind("link", "<Leave>",
lambda event: self.configure(cursor=""))
def update_font(self):
"""Update font after configuration change."""
size = CONFIG.get("Font", "text_size")
font = "%s %s" % (CONFIG.get("Font", "text_family").replace(" ", "\ "),
size)
mono = "%s %s" % (CONFIG.get("Font", "mono").replace(" ", "\ "), size)
self.configure(font=font)
self.tag_configure("mono", font=mono)
self.tag_configure("bold", font="%s bold" % font)
self.tag_configure("italic", font="%s italic" % font)
self.tag_configure("bold-italic", font="%s bold italic" % font)
margin = 2 * Font(self, font=font).measure("m")
self.tag_configure("enum", lmargin1=0, lmargin2=margin + 5,
tabs=(margin, 'right', margin + 5, 'left'))
def mode_change(self, new_mode):
self._undo_stack[-1].append(('mode', self.mode, new_mode))
self.mode = new_mode
def cb_state_change(self, cb, new_state):
self.add_undo_sep()
self._undo_stack[-1].append(('checkbox_state', self.index(cb), new_state))
self.add_undo_sep()
def undo(self, event=None):
if self.cget("state") != "disabled":
try:
items = []
# skip empty sets
while not items:
items = self._undo_stack.pop()
except IndexError:
# empty stack
self._undo_stack.append([])
else:
self._redo_stack.append(items)
for item in reversed(items):
self._undo_single(item)
if not self._undo_stack:
self._undo_stack.append([])
return "break"
def redo(self, event=None):
if self.cget("state") != "disabled":
try:
items = self._redo_stack.pop()
except IndexError:
# empty stack
pass
else:
self._undo_stack.append(items)
for item in items:
self._redo_single(item)
return "break"
def add_undo_sep(self):
if self._undo_stack[-1]:
self._undo_stack.append([])
self._redo_stack.clear()
def _undo_single(self, item):
if 'insert_' in item[0]:
self.delete(item[1])
elif item[0] == 'insert':
self.delete(item[1], item[2])
elif item[0] == 'link':
self.links[item[1]] = item[2]
elif item[0] == 'delete':
self._restore_text_with_prop(item[1], item[3])
elif item[0] == 'paste':
self.delete(item[1], item[2])
elif item[0] == 'tag_remove':
self.tag_add(*item[1:])
elif item[0] == 'tag_add':
self.tag_remove(item[1], item[2], *item[3])
elif item[0] == 'mode':
self.mode = item[1]
self.master.mode.set(item[1])
elif item[0] == 'checkbox_state':
win = self.window_cget(item[1], 'window')
if item[2]:
self.nametowidget(win).state(('!selected', '!alternate'))
else:
self.nametowidget(win).state(('selected', '!alternate'))
def _redo_single(self, item):
if item[0] == 'insert_char':
self.insert(item[1], item[2])
elif item[0] == 'insert_image':
self.image_create(item[1], item[2])
elif item[0] == 'insert_latex':
index, kw, img_name, latex = item[1:]
self.latex[img_name] = latex
self.image_create(index, **kw)
elif item[0] == 'insert_checkbox':
self.checkbox_create(item[1], item[2])
elif item[0] == 'insert':
self.insert(item[1], item[3], *item[4])
if self.mode != "note":
self.tag_add(self.mode, "1.0", "end")
elif item[0] == 'link':
self.links[item[1]] = item[3]
elif item[0] == 'delete':
self.delete(item[1], item[2])
elif item[0] == 'paste':
self._restore_text_with_prop(item[1], item[3])
elif item[0] == 'tag_remove':
self.tag_remove(*item[1:])
elif item[0] == 'tag_add':
self.tag_add(item[1], item[2], *item[3])
elif item[0] == 'mode':
self.mode = item[2]
self.master.mode.set(item[2])
elif item[0] == 'checkbox_state':
win = self.window_cget(item[1], 'window')
if item[2]:
self.nametowidget(win).state(('selected', '!alternate'))
else:
self.nametowidget(win).state(('!selected', '!alternate'))
def checkbox_create(self, index, state=('!alternate',), **kw):
kw2 = kw.copy()
kw2['takefocus'] = False
kw2['style'] = self.cb_style
ch = Checkbox(self, **kw2)
ch.state(state)
self.window_create(index, window=ch)
def checkbox_create_undoable(self, index, state=('!alternate',)):
self._undo_stack[-1].append(('insert_checkbox', self.index(index), state))
self._redo_stack.clear()
ch = Checkbox(self, takefocus=False, style=self.cb_style)
ch.state(state)
self.window_create(index, window=ch)
def image_create_undoable(self, index, **kw):
self._undo_stack[-1].append(('insert_image', self.index(index), kw))
self._redo_stack.clear()
self.image_create(index, **kw)
def link_create_undoable(self, link_nb, link):
self._undo_stack[-1].append(('link', link_nb, self.links.get(link_nb), link))
self._redo_stack.clear()
self.links[link_nb] = link
def latex_create_undoable(self, index, img_name, image, latex):
"""Insert image generated from latex expression given in the entry."""
im = os.path.join(PATH_LATEX, img_name)
kw = dict(align='bottom', image=image, name=im)
self._undo_stack[-1].append(('insert_latex', self.index(index), kw, img_name, latex))
self._redo_stack.clear()
self.latex[img_name] = latex
self.image_create(index, **kw)
def tag_remove_undoable(self, tagName, index1, index2=None):
self._undo_stack[-1].append(('tag_remove', tagName, self.index(index1),
self.index(index2)))
self.tag_remove(tagName, index1, index2)
def tag_add_undoable(self, tagName, index1, *args):
self._undo_stack[-1].append(('tag_add', tagName, self.index(index1), [self.index(i) for i in args]))
self.tag_add(tagName, index1, *args)
def _on_ctrl_keypress(self, event):
pass
def delete_undoable(self, index1, index2=None):
index1 = self.index(index1)
if index2 is None:
index2 = self.index('{}+1c'.format(index1))
else:
index2 = self.index(index2)
self._undo_stack[-1].append(('delete', index1, index2,
self._copy_text(index1, index2)))
self.delete(index1, index2)
def insert_undoable(self, index, chars, *args):
index1 = self.index(index)
self.insert(index, chars, *args)
index2 = self.index('{}+{}c'.format(index1, len(chars)))
self._undo_stack[-1].append(('insert', index1, index2, chars, args))
def _auto_word_replacement(self):
if self._current_word == self.get('insert-%ic' % len(self._current_word), 'insert'):
replacement = AUTOCORRECT.get(self._current_word)
if replacement is not None:
self.add_undo_sep()
self.delete_undoable('insert-%ic' % len(self._current_word), 'insert')
self.insert_undoable('insert', replacement)
self.add_undo_sep()
self._current_word = ""
def _on_keypress(self, event):
# --- deletion
if event.keysym == 'BackSpace':
self._redo_stack.clear()
self._current_word = ""
self.add_undo_sep()
deb_line = self.get("insert linestart", "insert")
tags = self.tag_names("insert")
if self.tag_ranges("sel"):
if self.tag_nextrange("enum", "sel.first", "sel.last"):
update = True
else:
update = False
self.delete_undoable("sel.first", "sel.last")
if update:
self.update_enum()
elif self.index("insert") != "1.0":
if re.match('^\t[0-9]+\.\t$', deb_line) and 'enum' in tags:
self.delete_undoable("insert linestart", "insert")
self.insert_undoable("insert", "\t\t")
self.update_enum()
elif deb_line == "\t•\t" and 'list' in tags:
self.delete_undoable("insert linestart", "insert")
self.insert_undoable("insert", "\t\t")
elif deb_line == "\t\t":
self.delete_undoable("insert linestart", "insert")
elif "todolist" in tags and self.index("insert") == self.index("insert linestart+1c"):
try:
ch = self.window_cget("insert-1c", "window")
self.delete_undoable("insert-1c")
self.children[ch.split('.')[-1]].destroy()
self.insert_undoable("insert", "\t\t")
except TclError:
self.delete_undoable("insert-1c")
else:
self.delete_undoable("insert-1c")
self.add_undo_sep()
return 'break'
elif event.keysym == 'Delete':
self._redo_stack.clear()
self._current_word = ""
sel = self.tag_ranges('sel')
if sel:
self.add_undo_sep()
self._undo_stack[-1].append(('delete', sel[0], sel[1],
self._copy_text(*sel)))
self.add_undo_sep()
# --- newline
elif event.keysym == 'Return':
self._redo_stack.clear()
self._auto_word_replacement()
if self.mode == "list":
self.add_undo_sep()
self.insert_undoable("insert", "\n\t•\t")
self.tag_add("list", "1.0", "end")
self.add_undo_sep()
elif self.mode == "todolist":
self.add_undo_sep()
self.insert_undoable("insert", "\n")
self.checkbox_create_undoable("insert", ('!alternate',))
self.tag_add("todolist", "1.0", "end")
self.add_undo_sep()
elif self.mode == "enum":
self.add_undo_sep()
self.insert_undoable("insert", "\n\t0.\t")
self.update_enum()
self.add_undo_sep()
else:
self.insert_undoable("insert", "\n")
self.add_undo_sep()
return 'break'
# --- normal char
elif event.char != '':
self._redo_stack.clear()
char = event.char
self._current_word += char
sel = self.tag_ranges('sel')
if sel:
self.add_undo_sep()
self._undo_stack[-1].append(('delete', sel[0], sel[1],
self._copy_text(*sel)))
self.add_undo_sep()
self._undo_stack[-1].append(('insert_char', sel[0], char))
else:
self._undo_stack[-1].append(('insert_char', self.index('insert'), char))
if event.keysym in ['space', 'Tab']:
self._current_word = self._current_word[:-1]
self._auto_word_replacement()
self.add_undo_sep()
def _copy_text(self, index1, index2):
"""Copy text, images, checkboxes with the formatting between index1 and index2."""
content = []
deb = sorting(str(index1))
fin = sorting(str(index2))
for l in range(deb[0], fin[0] + 1):
if l == deb[0]:
dc = deb[1]
else:
dc = 0
if l == fin[0]:
nc = fin[1]
else:
nc = sorting(str(self.index('%i.end' % l)))[1]
for c in range(dc, nc):
index = '%i.%i' % (l, c)
try:
keys = ['name', 'image', 'align', 'padx', 'pady']
kw = {k: self.image_cget(index, k) for k in keys}
tags = self.tag_names(index)
i = 0
while i < len(tags) and not re.match(r'[0-9]+\.png', tags[i]):
i += 1
if i < len(tags):
latex = self.latex[tags[i]]
content.append(('latex', kw, tags, tags[i], latex))
else:
content.append(('image', kw, tags))
except TclError:
try:
win = self.nametowidget(self.window_cget(index, 'window'))
state = win.state()
tags = self.tag_names(index)
content.append(('checkbox', state, tags))
except TclError:
tags = self.tag_names(index)
content.append(('char', self.get(index), tags))
if l < fin[0]:
content.append(('char', '\n', []))
return content
def _restore_text_with_prop(self, index1, content):
"""Restore text, images, checkboxes and formatting at index1."""
self.mark_set('insert', index1)
for c in content:
index = self.index('insert')
if c[0] == 'image':
self.image_create(index, **c[1])
elif c[0] == 'latex':
self.image_create(index, **c[1])
self.latex[c[3]] = c[4]
elif c[0] == 'checkbox':
self.checkbox_create(index, c[1])
self.update_idletasks()
else:
self.insert('insert', c[1])
for tag in c[2]:
self.tag_add(tag, index)
self.tag_remove('sel', '1.0', 'end')
# --- Text style
def toggle_text_style(self, style):
"""Toggle the style of the selected text."""
if self.tag_ranges("sel"):
current_tags = self.tag_names("sel.first")
self.add_undo_sep()
# remove tag
if style in current_tags:
# first char is in style so 'unstyle' the range
tag_ranges = text_ranges(self, style, "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable(style, d, f)
tag_ranges = text_ranges(self, "bold-italic", "sel.first", "sel.last")
style2 = "bold" if style == "italic" else "italic"
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable("bold-italic", d, f)
self.tag_add_undoable(style2, d, f)
elif style == "bold" and "bold-italic" in current_tags:
tag_ranges = text_ranges(self, "bold-italic", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable("bold-italic", d, f)
self.tag_add_undoable("italic", d, f)
tag_ranges = text_ranges(self, "bold", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable("bold", d, f)
elif style == "italic" and "bold-italic" in current_tags:
tag_ranges = text_ranges(self, "bold-italic", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable("bold-italic", d, f)
self.tag_add_undoable("bold", d, f)
tag_ranges = text_ranges(self, "italic", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable("italic", d, f)
# add tag
elif style == "bold":
self.tag_add_undoable("bold", "sel.first", "sel.last")
tag_ranges = text_ranges(self, "italic", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_add_undoable("bold-italic", d, f)
self.tag_remove_undoable("italic", d, f)
self.tag_remove_undoable("bold", d, f)
elif style == "italic":
self.tag_add_undoable("italic", "sel.first", "sel.last")
tag_ranges = text_ranges(self, "bold", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_add_undoable("bold-italic", d, f)
self.tag_remove_undoable("italic", d, f)
self.tag_remove_undoable("bold", d, f)
else:
self.tag_add_undoable(style, "sel.first", "sel.last")
self.add_undo_sep()
def toggle_underline(self):
"""Toggle underline property of the selected text."""
if self.tag_ranges("sel"):
current_tags = self.tag_names("sel.first")
self.add_undo_sep()
if "underline" in current_tags:
# first char is in style so 'unstyle' the range
tag_ranges = text_ranges(self, "underline", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable("underline", d, f)
for coul in TEXT_COLORS.values():
tag_ranges = text_ranges(self, coul + "-underline", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable(coul + "-underline", d, f)
else:
self.tag_add_undoable("underline", "sel.first", "sel.last")
for coul in TEXT_COLORS.values():
r = text_ranges(self, coul, "sel.first", "sel.last")
if r:
for deb, fin in zip(r[::2], r[1::2]):
self.tag_add_undoable(coul + "-underline", deb, fin)
self.add_undo_sep()
def toggle_overstrike(self):
"""Toggle overstrike property of the selected text."""
if self.tag_ranges("sel"):
current_tags = self.tag_names("sel.first")
self.add_undo_sep()
if "overstrike" in current_tags:
# first char is in style so 'unstyle' the range
tag_ranges = text_ranges(self, "overstrike", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable("overstrike", d, f)
for coul in TEXT_COLORS.values():
tag_ranges = text_ranges(self, coul + "-overstrike", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable(coul + "-overstrike", d, f)
else:
self.tag_add_undoable("overstrike", "sel.first", "sel.last")
for coul in TEXT_COLORS.values():
r = text_ranges(self, coul, "sel.first", "sel.last")
if r:
for deb, fin in zip(r[::2], r[1::2]):
self.tag_add_undoable(coul + "-overstrike", deb, fin)
self.add_undo_sep()
def change_sel_color(self, color):
"""Change the color of the selection."""
if self.tag_ranges("sel"):
self.add_undo_sep()
for coul in TEXT_COLORS.values():
tag_ranges = text_ranges(self, coul, "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable(coul, d, f)
tag_ranges = text_ranges(self, coul + "-overstrike", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable(coul + "-overstrike", d, f)
tag_ranges = text_ranges(self, coul + "-underline", "sel.first", "sel.last")
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable(coul + "-underline", d, f)
if not color == "black":
self.tag_add_undoable(color, "sel.first", "sel.last")
underline = text_ranges(self, "underline", "sel.first", "sel.last")
overstrike = text_ranges(self, "overstrike", "sel.first", "sel.last")
for deb, fin in zip(underline[::2], underline[1::2]):
self.tag_add_undoable(color + "-underline", deb, fin)
for deb, fin in zip(overstrike[::2], overstrike[1::2]):
self.tag_add_undoable(color + "-overstrike", deb, fin)
self.add_undo_sep()
def set_align(self, alignment):
"""Align the text according to alignment (left, right, center)."""
if self.tag_ranges("sel"):
deb = self.index("sel.first linestart")
fin = self.index("sel.last lineend")
else:
deb = self.index("insert linestart")
fin = self.index("insert lineend")
if "\t" not in self.get(deb, fin):
self.add_undo_sep()
# tabulations don't support right/center alignment
# remove old alignment tag
for align in ['left', 'right', 'center']:
if align != alignment:
tag_ranges = text_ranges(self, align, deb, fin)
for d, f in zip(tag_ranges[::2], tag_ranges[1::2]):
self.tag_remove_undoable(align, d, f)
# set new alignment tag
self.tag_add_undoable(alignment, deb, fin)
self.add_undo_sep()
def update_enum(self):
"""Update enumeration numbers."""
lines = self.get("1.0", "end").splitlines()
indexes = []
for i, l in enumerate(lines):
res = re.match('^\t[0-9]+\.\t', l)
res2 = re.match('^\t[0-9]+\.', l)
if res:
indexes.append((i, res.end()))
elif res2:
indexes.append((i, res2.end()))
for j, (i, end) in enumerate(indexes):
self.delete_undoable("%i.0" % (i + 1), "%i.%i" % (i + 1, end))
self.insert_undoable("%i.0" % (i + 1), "\t%i.\t" % (j + 1))
self.tag_add("enum", "1.0", "end")
self.add_undo_sep()
| gpl-3.0 | 8,432,323,397,572,255,000 | 44.091493 | 108 | 0.504592 | false | 3.782416 | true | false | false |
ideascf/octopus | proto/service_proto.py | 1 | 3222 | # coding=utf-8
import traceback
import etcd
import logging
import json
from octopus import err
from octopus import constant
from octopus.util import tools
log = logging.getLogger(constant.LOGGER_NAME)
def register(ec, service_name, service_info):
"""
:param ec: etcd的客户端对象
:param service_name:
:param service_info:
:type ec: etcd.Client
:type service_name: str
:type service_info: dict
:return:
:rtype: str
"""
add_info = service_info.get('addr')
if not add_info\
or not {'host', 'port'}.issubset(add_info.keys()):
raise err.OctpParamError('service_addr must contain "host" and "port".')
result = ec.write(
tools.service_dir_name(service_name),
json.dumps(service_info),
append=True,
ttl=constant.SERVICE_TTL
)
log.debug('new key: %s', result.key)
return result.key
def unregister(ec, service_token):
"""
:param ec:
:param service_token:
:type ec: etcd.Client
:type service_token: str
:return: 是否成功
:rtype: bool
"""
try:
ec.delete(service_token)
except Exception:
# TODO 完善对异常的处理
log.warn('Unregister service failed. err: %s', traceback.format_exc())
return False
else:
return True
def watch(ec, service_name, timeout=None):
"""
用来监听一个服务集群的改动
:param ec:
:param service_name:
:param timeout:
:type ec: etcd.Client
:type service_name: str
:type timeout: float
:return:
"""
return ec.watch(tools.service_dir_name(service_name), timeout=timeout, recursive=True)
def watch_locker(ec, service_locker_key, timeout=None):
"""
watch locker's change.
:param ec:
:param service_locker_key:
:param timeout:
:type ec: etcd.Client
:type service_locker_key: str
:type timeout: float
:return:
"""
return ec.watch(service_locker_key, timeout=timeout, recursive=True)
def get(ec, service_name):
"""
:param ec:
:param service_name:
:type ec: etcd.Client
:return:
"""
try:
result = ec.get(tools.service_dir_name(service_name))
except etcd.EtcdKeyNotFound:
raise err.OctpServiceNotFoundError('Can NOT find service(%s) from etcd', service_name)
else:
return result
def locker(ec, service_name):
"""
:param ec:
:param service_name:
:type ec: etcd.Client
:return:
"""
return etcd.Lock(ec, tools.locker_name(service_name))
def alive(ec, service_name, service_token):
"""
:param ec:
:param service_name:
:param service_token:
:type ec: etcd.Client
:return:
"""
# this way, not upload parameter 'refresh', so can't only refresh ttl.
# return ec.write(
# service_token,
# None,
# ttl=constant.SERVICE_TTL,
# refresh=True,
# prevExist=True, # refresh and prevExist, can refresh ttl only.
# )
return ec.api_execute(
'/v2/keys/' + service_token,
ec._MPUT,
{
'refresh': True,
'prevExist': True,
'ttl': constant.SERVICE_TTL,
}
)
| mit | 889,545,740,744,177,000 | 20.066667 | 94 | 0.601582 | false | 3.326316 | false | false | false |
LCAS/spqrel_tools | slu4p/speech_to_text/asr.py | 1 | 8115 | #http://doc.aldebaran.com/2-5/naoqi/audio/alspeechrecognition-api.html
import signal
import qi
import argparse
import sys
from os.path import expanduser
import os
import time
from conditions import set_condition
class SpeechRecognition(object):
USE_GOOGLE = True
CHANNELS = [0, 0, 1, 0]
audio_recorder = None
recording = False
def __init__(self, vocab, app):
super(SpeechRecognition, self).__init__()
app.start()
self.session = app.session
self.__shutdown_requested = False
signal.signal(signal.SIGINT, self.signal_handler)
#Starting services
self.asr_service = self.session.service("ALSpeechRecognition")
self.asr_service.setLanguage("English")
self.audio_recorder = self.session.service("ALAudioRecorder")
self.memory_service = self.session.service("ALMemory")
#establishing test vocabulary
#vocabulary = ["yes", "no", "please", "hello", "goodbye", "hi, there", "go to the kitchen"]
with open(vocab) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
vocabulary = [x.strip() for x in content]
print "Vocabulary read", vocabulary
self.asr_service.pause(True)
#self.asr_service.removeAllContext()
try:
self.asr_service.setVocabulary(vocabulary, False)
#self.asr_service.setParameter("Sensitivity", 0.1)
self.asr_service.setParameter("NbHypotheses", 3)
self.asr_service.setAudioExpression(False)
except:
print "error setting vocabulary"
self.asr_service.pause(False)
def start(self):
# Start the speech recognition engine with user Test_ASR
self.subscribe_name = "Test_ASR" + str(time.time())
self.asr_service.subscribe(self.subscribe_name)
#print 'Speech recognition engine started'
#subscribe to event WordRecognized
self.subWordRecognized = self.memory_service.subscriber("WordRecognized")
#self.idSubWordRecognized = self.subWordRecognized.signal.connect(self.onWordRecognized)
# speech detected
self.subSpeechDet = self.memory_service.subscriber("SpeechDetected")
#self.idSubSpeechDet = self.subSpeechDet.signal.connect(self.onSpeechDetected)
# enable
self.subEnable = self.memory_service.subscriber("ASR_enable")
self.idSubEnable = self.subEnable.signal.connect(self.onEnable)
#subscribe to google asr transcription
#if self.USE_GOOGLE:
#self.audio_recorder.stopMicrophonesRecording()
#self.googleAsrRecognized = self.memory_service.subscriber("GoogleAsrRecognized")
#self.idGoogleAsrRecognized = self.googleAsrRecognized.signal.connect(self.onGoogleASR)
#self.audio_recorder.startMicrophonesRecording("utterance" + ".wav", "wav", 44100, [1, 1, 1, 1])
#print 'Audio recorder engine started'
self.is_enabled = False
def quit(self):
#Disconnecting callbacks and subscribers
self.asr_service.unsubscribe(self.subscribe_name)
if self.idSubWordRecognized is not None:
self.subWordRecognized.signal.disconnect(self.idSubWordRecognized)
if self.idSubSpeechDet is not None:
self.subSpeechDet.signal.disconnect(self.idSubSpeechDet)
if self.idSubEnable is not None:
self.subEnable.signal.disconnect(self.idSubEnable)
#if self.USE_GOOGLE:
# self.googleAsrRecognized.signal.disconnect(self.idGoogleAsrRecognized)
def signal_handler(self, signal, frame):
print "[" + self.__class__.__name__ + "] Caught Ctrl+C, stopping."
self.__shutdown_requested = True
print "[" + self.__class__.__name__ + "] Good-bye"
def onSpeechDetected(self, value):
print "speechdetected=", value
if value == 1:
if self.USE_GOOGLE:
if not self.recording:
#try:
# self.AUDIO_FILE_DIR = self.memory_proxy.getData("NAOqibag/CurrentLogFolder") + "/asr_logs/"
#except:
self.AUDIO_FILE_DIR = expanduser('~') + '/bags/no_data/asr_logs/'
if not os.path.exists(self.AUDIO_FILE_DIR):
os.makedirs(self.AUDIO_FILE_DIR)
self.AUDIO_FILE_PATH = self.AUDIO_FILE_DIR + 'SPQReL_mic_'
#self.audio_recorder.stopMicrophonesRecording()
self.AUDIO_FILE = self.AUDIO_FILE_PATH + str(time.time())
self.audio_recorder.startMicrophonesRecording(self.AUDIO_FILE + ".wav", "wav", 44100, self.CHANNELS)
self.recording = True
print "Audio recorder started recording"
def onWordRecognized(self, value):
print "value=",value
if self.USE_GOOGLE:
if self.recording:
self.audio_recorder.stopMicrophonesRecording()
self.recording = False
print "Audio recorder stopped recording"
self.memory_service.raiseEvent("GoogleRequest", self.AUDIO_FILE)
for i, val in enumerate(len(value)):
if val in ["stop", "stop following", "don't follow", "stop following me"]:
if value[i+1] > "0.4":
set_condition(self.memory, "stopfollowing", "true")
#self.audio_recorder.stopMicrophonesRecording()
#print "Audio recorder stopped recording"
#if self.USE_GOOGLE:
# self.memory_service.raiseEvent("GoogleRequest", self.AUDIO_FILE)
#def onGoogleASR(self, value):
# print "googleasr=", value
def onEnable(self, value):
print "enable=", value
if value == "0":
if self.is_enabled:
self.is_enabled = False
if self.USE_GOOGLE:
self.audio_recorder.stopMicrophonesRecording()
if self.subWordRecognized is not None:
self.subWordRecognized.signal.disconnect(self.idSubWordRecognized)
if self.subSpeechDet is not None:
self.subSpeechDet.signal.disconnect(self.idSubSpeechDet)
print "ASR disabled"
else:
print "ASR already disabled"
else:
if not self.is_enabled:
self.is_enabled = True
self.idSubWordRecognized = self.subWordRecognized.signal.connect(self.onWordRecognized)
self.idSubSpeechDet = self.subSpeechDet.signal.connect(self.onSpeechDetected)
# TODO move it here!!
#self.subscribe(
# event=SpeechRecognition.WR_EVENT,
# callback=self.word_recognized_callback
#)
print "ASR enabled"
else:
print "ASR already enabled"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--pip", type=str, default=os.environ['PEPPER_IP'],
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--pport", type=int, default=9559,
help="Naoqi port number")
parser.add_argument("--vocab", type=str, default="resources/nuance_dictionary.txt",
help="The nuance vocabulary")
args = parser.parse_args()
pip = args.pip
pport = args.pport
vocab = args.vocab
#Starting application
try:
connection_url = "tcp://" + pip + ":" + str(pport)
app = qi.Application(["asr", "--qi-url=" + connection_url ])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + pip + "\" on port " + str(pport) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
sr = SpeechRecognition(
vocab=vocab,
app=app
)
sr.start()
#let it run
app.run()
sr.quit()
if __name__ == "__main__":
main()
| mit | 2,345,651,052,511,214,000 | 37.278302 | 120 | 0.60345 | false | 3.899568 | false | false | false |
vgonisanz/wpm | examples/toggle_table_sample.py | 1 | 1652 | # -*- coding: utf-8 -*-
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src/elements'))
from curses import wrapper # Use my own wrapper
from wpm import Wpm
from toggle_table import ToggleTable
# Configuration
toggletable_width = 30
toggletable_height = 10
toggletable_x0 = 1
toggletable_y0 = 2
# Variables
wpm = None
background = None
def initialize():
global wpm
global background
wpm = Wpm(True)
wpm.logger.info("Starting %s" % os.path.basename(__file__))
background = wpm.get_screen() # Get main window to print
return None
def draw_toggletable():
background.print_message("This is a toggle table:")
toggletable = ToggleTable(toggletable_width, toggletable_height, toggletable_x0, toggletable_y0)
toggletable.set(0, 0)
toggletable.set(2, 3)
toggletable.set(2, 4)
toggletable.draw()
#toggletable.print_border_type()
background.waitforkey()
return None
def draw_random_toggletable():
background.clear()
background.print_message("This is a toggle table generated randomly")
toggletable = ToggleTable(toggletable_width, toggletable_height, toggletable_x0, toggletable_y0)
toggletable.generate_random_table()
toggletable.draw()
background.waitforkey()
return None
def run_toggletable_widget():
return None
def main(stdscr):
initialize()
draw_toggletable()
draw_random_toggletable()
run_toggletable_widget()
return None
if __name__ == "__main__":
wrapper(main)
print("Thanks for use %s" % os.path.basename(__file__))
| lgpl-3.0 | 7,595,882,164,654,035,000 | 24.030303 | 100 | 0.688862 | false | 3.434511 | false | false | false |
drawquest/drawquest-web | website/drawquest/apps/palettes/forms.py | 1 | 3323 | from itertools import chain
from django import forms
from django.forms.util import flatatt
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from drawquest.apps.palettes.models import Color, ColorPack
# Widget rendering code forked from django.forms.widgets
class ColorCheckboxInput(forms.CheckboxInput):
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
return format_html('<input{0} />', flatatt(final_attrs))
class ColorsSelect(forms.CheckboxSelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
output = ['<ul>']
# Normalize to strings
str_values = set([force_text(v) for v in value])
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = format_html(' for="{0}"', final_attrs['id'])
else:
label_for = ''
cb = ColorCheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_text(option_value)
rendered_cb = cb.render(name, option_value)
option_label = force_text(option_label)
output.append(format_html('<li><label{0}>{1} {2}</label></li>',
label_for, rendered_cb, option_label))
output.append('</ul>')
return mark_safe('\n'.join(output))
def label_from_color_instance(obj):
return mark_safe('<div class="color_option_swatch" style="background-color:rgb({red}, {green}, {blue})" title="{label}"><div class="color_option_swatch_inner"></div></div>'.format(red=obj.red, green=obj.green, blue=obj.blue, label=obj.label))
class ColorPackForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ColorPackForm, self).__init__(*args, **kwargs)
self.fields['colors'].label_from_instance = label_from_color_instance
self.fields['colors'].queryset = Color.includable_in_color_pack()
class Meta(object):
model = ColorPack
exclude = ['id', 'owners', 'legacy_palette_name']
widgets = {
'ordinal': forms.TextInput(attrs={'class': 'ordinal'}),
'colors': ColorsSelect(),
}
class ColorForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ColorForm, self).__init__(*args, **kwargs)
class Meta(object):
model = Color
exclude = ['id', 'owners']
widgets = {
'ordinal': forms.TextInput(attrs={'class': 'ordinal'}),
}
| bsd-3-clause | -7,076,176,437,245,281,000 | 39.036145 | 246 | 0.609991 | false | 3.832757 | false | false | false |
elbeardmorez/quodlibet | quodlibet/quodlibet/ext/gstreamer/karaoke.py | 1 | 4312 | # -*- coding: utf-8 -*-
# Copyright 2012 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gst, Gtk, GObject
from quodlibet import _
from quodlibet.plugins import PluginImportException
from quodlibet.plugins.gstelement import GStreamerPlugin
from quodlibet import qltk
from quodlibet import config
from quodlibet.qltk import Icons
_PLUGIN_ID = "karaoke"
_SETTINGS = {
"band": [_("Filter _band:"),
_("The Frequency band of the filter"), 220.0],
"width": [_("Filter _width:"),
_("The Frequency width of the filter"), 100.0],
"level": [_("_Level:"), _("Level of the effect"), 1.0],
}
def get_cfg(option):
cfg_option = "%s_%s" % (_PLUGIN_ID, option)
default = _SETTINGS[option][2]
return config.getfloat("plugins", cfg_option, default)
def set_cfg(option, value):
cfg_option = "%s_%s" % (_PLUGIN_ID, option)
if get_cfg(option) != value:
config.set("plugins", cfg_option, value)
class Preferences(Gtk.VBox):
__gsignals__ = {
'changed': (GObject.SignalFlags.RUN_LAST, None, tuple()),
}
def __init__(self):
super(Preferences, self).__init__(spacing=12)
table = Gtk.Table(n_rows=3, n_columns=2)
table.set_col_spacings(6)
table.set_row_spacings(6)
labels = {}
for idx, key in enumerate(["level", "band", "width"]):
label = Gtk.Label(label=_SETTINGS[key][0])
labels[key] = label
label.set_alignment(0.0, 0.5)
label.set_padding(0, 6)
label.set_tooltip_text(_SETTINGS[key][1])
label.set_use_underline(True)
table.attach(label, 0, 1, idx, idx + 1,
xoptions=Gtk.AttachOptions.FILL |
Gtk.AttachOptions.SHRINK)
def scale_changed(scale, option):
value = scale.get_value()
set_cfg(option, value)
self.emit("changed")
max_values = [1.0, 441, 100]
steps = [0.01, 10, 10]
pages = [0.1, 50, 25]
scales = {}
for idx, key in enumerate(["level", "band", "width"]):
max_value = max_values[idx]
step = steps[idx]
page = pages[idx]
scale = Gtk.HScale(
adjustment=Gtk.Adjustment.new(0, 0, max_value, step, page, 0))
scales[key] = scale
if step < 0.1:
scale.set_digits(2)
scale.add_mark(_SETTINGS[key][2], Gtk.PositionType.BOTTOM, None)
labels[key].set_mnemonic_widget(scale)
scale.set_value_pos(Gtk.PositionType.RIGHT)
table.attach(scale, 1, 2, idx, idx + 1)
scale.connect('value-changed', scale_changed, key)
scale.set_value(get_cfg(key))
def format_perc(scale, value):
return _("%d %%") % (value * 100)
scales["level"].connect('format-value', format_perc)
def format_hertz(scale, value):
return _("%d Hz") % value
scales["band"].connect('format-value', format_hertz)
scales["width"].connect('format-value', format_hertz)
self.pack_start(qltk.Frame(_("Preferences"), child=table),
True, True, 0)
class Karaoke(GStreamerPlugin):
PLUGIN_ID = _PLUGIN_ID
PLUGIN_NAME = _("Karaoke")
PLUGIN_DESC = _("Removes main vocals from audio.")
PLUGIN_ICON = Icons.AUDIO_INPUT_MICROPHONE
@classmethod
def setup_element(cls):
return Gst.ElementFactory.make('audiokaraoke', cls.PLUGIN_ID)
@classmethod
def update_element(cls, element):
element.set_property("level", get_cfg("level"))
element.set_property("filter-band", get_cfg("band"))
element.set_property("filter-width", get_cfg("width"))
@classmethod
def PluginPreferences(cls, window):
prefs = Preferences()
prefs.connect("changed", lambda *x: cls.queue_update())
return prefs
if not Karaoke.setup_element():
raise PluginImportException(
"GStreamer element 'audiokaraoke' missing (gst-plugins-good)")
| gpl-2.0 | -7,045,133,110,099,888,000 | 32.426357 | 78 | 0.590445 | false | 3.531532 | false | false | false |
jiadaizhao/LeetCode | 0501-0600/0567-Permutation in String/0567-Permutation in String.py | 1 | 1393 | import collections
class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
if len(s2) < len(s1):
return False
table = collections.Counter(s1)
count = len(table)
start = 0
for i in range(len(s2)):
table[s2[i]] -= 1
if table[s2[i]] == 0:
count -= 1
if count == 0 and i - start + 1 == len(s1):
return True
while count == 0:
table[s2[start]] += 1
if table[s2[start]] == 1:
count += 1
elif i - start == len(s1):
return True
start += 1
return False
class Solution2:
def checkInclusion(self, s1: str, s2: str) -> bool:
if len(s2) < len(s1):
return False
table = collections.Counter(s1)
count = len(s1)
start = 0
for i in range(len(s2)):
table[s2[i]] -= 1
if table[s2[i]] >= 0:
count -= 1
if count == 0:
return True
if i - start + 1 == len(s1):
table[s2[start]] += 1
if table[s2[start]] > 0:
count += 1
start += 1
return False
| mit | -1,377,102,481,941,461,800 | 28.638298 | 59 | 0.38191 | false | 4.183183 | false | false | false |
rspavel/spack | lib/spack/spack/cmd/debug.py | 3 | 2974 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import platform
import re
from datetime import datetime
from glob import glob
import llnl.util.tty as tty
from llnl.util.filesystem import working_dir
import spack.architecture as architecture
import spack.paths
from spack.main import get_version
from spack.util.executable import which
description = "debugging commands for troubleshooting Spack"
section = "developer"
level = "long"
def setup_parser(subparser):
sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='debug_command')
sp.add_parser('create-db-tarball',
help="create a tarball of Spack's installation metadata")
sp.add_parser('report', help='print information useful for bug reports')
def _debug_tarball_suffix():
now = datetime.now()
suffix = now.strftime('%Y-%m-%d-%H%M%S')
git = which('git')
if not git:
return 'nobranch-nogit-%s' % suffix
with working_dir(spack.paths.prefix):
if not os.path.isdir('.git'):
return 'nobranch.nogit.%s' % suffix
# Get symbolic branch name and strip any special chars (mainly '/')
symbolic = git(
'rev-parse', '--abbrev-ref', '--short', 'HEAD', output=str).strip()
symbolic = re.sub(r'[^\w.-]', '-', symbolic)
# Get the commit hash too.
commit = git(
'rev-parse', '--short', 'HEAD', output=str).strip()
if symbolic == commit:
return "nobranch.%s.%s" % (commit, suffix)
else:
return "%s.%s.%s" % (symbolic, commit, suffix)
def create_db_tarball(args):
tar = which('tar')
tarball_name = "spack-db.%s.tar.gz" % _debug_tarball_suffix()
tarball_path = os.path.abspath(tarball_name)
base = os.path.basename(str(spack.store.root))
transform_args = []
if 'GNU' in tar('--version', output=str):
transform_args = ['--transform', 's/^%s/%s/' % (base, tarball_name)]
else:
transform_args = ['-s', '/^%s/%s/' % (base, tarball_name)]
wd = os.path.dirname(str(spack.store.root))
with working_dir(wd):
files = [spack.store.db._index_path]
files += glob('%s/*/*/*/.spack/spec.yaml' % base)
files = [os.path.relpath(f) for f in files]
args = ['-czf', tarball_path]
args += transform_args
args += files
tar(*args)
tty.msg('Created %s' % tarball_name)
def report(args):
print('* **Spack:**', get_version())
print('* **Python:**', platform.python_version())
print('* **Platform:**', architecture.Arch(
architecture.platform(), 'frontend', 'frontend'))
def debug(parser, args):
action = {
'create-db-tarball': create_db_tarball,
'report': report,
}
action[args.debug_command](args)
| lgpl-2.1 | 4,554,121,192,927,436,000 | 29.040404 | 79 | 0.619704 | false | 3.540476 | false | false | false |
cupen/django-xadmin | xadmin/compatibility.py | 1 | 1402 | # coding:utf-8
__author__ = '[email protected]'
import sys
from django.utils import six
from django.utils.encoding import smart_str, force_str
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
smart_unicode = smart_str
force_unicode = force_str
if PY2:
from django.utils.encoding import smart_unicode, force_unicode
def filte_dict(_dict, callback):
"""
>>> from django.conf import settings
>>> settings.configure()
>>> d = {"a":1, "b":2, "c":3}
>>> filte_dict(d, lambda k,v: k != "b")
:param _dict:
:param callback:
:return:
"""
if not isinstance(_dict, dict):
raise TypeError("Invalid dict:%s"%(_dict))
rs = {}
for k, v in _dict.items():
if callback(k, v):
rs[k] = v
return rs
_buindin_filter = filter
def oh_my_filter(callback, iterable):
if PY2: return _buindin_filter(callback, iterable)
return type(iterable)(_buindin_filter(callback, iterable))
filter = oh_my_filter
# used as py3 urllib.
# @see https://pythonhosted.org/six/#module-six.moves
urllib = six.moves.urllib
def urlopen(*args, **kwargs):
return urllib.request.urlopen(*args, **kwargs)
def http_get(url, encoding="utf-8"):
"""
>>> None != http_get("https://www.google.com")
True
"""
return urlopen(url).read().decode(encoding) | bsd-3-clause | -5,009,678,236,210,596,000 | 23.071429 | 66 | 0.600571 | false | 3.215596 | false | false | false |
timwaizenegger/raspberrypi-examples | sensor-thumb-joystick/ky023.py | 2 | 1703 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
reads position from thumb-joystick
needs spidev installed
http://www.raspberrypi-spy.co.uk/2014/08/enabling-the-spi-interface-on-the-raspberry-pi/
"""
import RPi.GPIO as GPIO
import time
import spidev
xPin = 0 # joystick x connected to A0
yPin = 1 # joystick y connected to A1
swPin = 27 # sw connected to D27
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(swPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
tolerancevalue = 10
xZero = 512
yZero = 512
spi = spidev.SpiDev()
spi.open(0,0)
def readadc(adcnum):
# read SPI data from MCP3004 chip, 4 possible adc’s (0 thru 3)
if ((adcnum > 3) or (adcnum < 0)):
return -1
r = spi.xfer2([1,8 + adcnum << 4, 0])
print(r)
adcout = ((r[1] &3) << 8) + r[2]
return adcout
def position(adcnum, zerovalue):
return readadc(adcnum) - zerovalue
def eventSWButton(e):
print("SW from joystick pressed")
print(e)
# using the callback is optional
GPIO.add_event_detect(swPin, GPIO.FALLING, bouncetime = 200, callback = eventSWButton)
while True:
xPos = position(xPin, xZero)
yPos = position(yPin, yZero)
# in case you don't want to use the callback
#if (GPIO.input(swPin) == 0):
# print("Button pressed!")
if (abs(xPos) < tolerancevalue):
print("Not moving in X.")
elif (xPos > 0):
print("Moving ahead.")
print("X intensity: %5d" % abs(xPos))
else:
print("Moving backwards.")
print("X intensity: %5d" % abs(xPos))
if (abs(yPos) < tolerancevalue):
print("Not moving in Y.")
elif (yPos > 0):
print("Moving left.")
print("Y intensity: %5d" % abs(yPos))
else:
print("Moving right.")
print("Y intensity: %5d" % abs(yPos))
print("")
time.sleep(0.5)
print('done.') | mit | -5,053,283,674,795,448,000 | 21.394737 | 88 | 0.670782 | false | 2.569486 | false | false | false |
google/shaka-streamer | streamer/pipeline_configuration.py | 1 | 10668 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import enum
import os
import platform
import shlex
from . import bitrate_configuration
from . import configuration
from typing import List
# A randomly-chosen content ID in hex.
RANDOM_CONTENT_ID = base64.b16encode(os.urandom(16)).decode('UTF-8')
# The Widevine UAT server URL.
UAT_SERVER = 'https://license.uat.widevine.com/cenc/getcontentkey/widevine_test'
# Credentials for the Widevine test account.
WIDEVINE_TEST_ACCOUNT = 'widevine_test'
WIDEVINE_TEST_SIGNING_KEY = '1ae8ccd0e7985cc0b6203a55855a1034afc252980e970ca90e5202689f947ab9'
WIDEVINE_TEST_SIGNING_IV = 'd58ce954203b7c9a9a9d467f59839249'
# The default hardware acceleration API to use, per platform.
if platform.system() == 'Linux':
DEFAULT_HWACCEL_API = 'vaapi'
elif platform.system() == 'Darwin': # AKA macOS
DEFAULT_HWACCEL_API = 'videotoolbox'
else:
DEFAULT_HWACCEL_API = ''
class StreamingMode(enum.Enum):
LIVE = 'live'
"""Indicates a live stream, which has no end."""
VOD = 'vod'
"""Indicates a video-on-demand (VOD) stream, which is finite."""
class ManifestFormat(enum.Enum):
DASH = 'dash'
HLS = 'hls'
class ProtectionScheme(enum.Enum):
CENC = 'cenc'
"""AES-128-CTR mode."""
CBCS = 'cbcs'
"""AES-128-CBC mode with pattern encryption."""
class ProtectionSystem(enum.Enum):
WIDEVINE = 'Widevine'
FAIRPLAY = 'FairPlay'
PLAYREADY = 'PlayReady'
MARLIN = 'Marlin'
COMMON = 'CommonSystem'
class EncryptionMode(enum.Enum):
WIDEVINE = 'widevine'
"""Widevine key server mode"""
RAW = 'raw'
"""Raw key mode"""
class RawKeyConfig(configuration.Base):
"""An object representing a list of keys for Raw key encryption"""
label = configuration.Field(str).cast()
"""An arbitary string or a predefined DRM label like AUDIO, SD, HD, etc.
If not specified, indicates the default key and key_id."""
key_id = configuration.Field(configuration.HexString, required=True).cast()
"""A key identifier as a 32-digit hex string"""
key = configuration.Field(configuration.HexString, required=True).cast()
"""The encryption key to use as a 32-digit hex string"""
class EncryptionConfig(configuration.Base):
"""An object representing the encryption config for Shaka Streamer."""
enable = configuration.Field(bool, default=False).cast()
"""If true, encryption is enabled.
Otherwise, all other encryption settings are ignored.
"""
encryption_mode = configuration.Field(
EncryptionMode, default=EncryptionMode.WIDEVINE).cast()
"""Encryption mode to use. By default it is widevine but can be changed
to raw."""
protection_systems = configuration.Field(List[ProtectionSystem]).cast()
"""Protection Systems to be generated. Supported protection systems include
Widevine, PlayReady, FairPlay, Marin and CommonSystem.
"""
pssh = configuration.Field(configuration.HexString).cast()
"""One or more concatenated PSSH boxes in hex string format. If this and
`protection_systems` is not specified, a v1 common PSSH box will be
generated.
Applies to 'raw' encryption_mode only.
"""
iv = configuration.Field(configuration.HexString).cast()
"""IV in hex string format. If not specified, a random IV will be
generated.
Applies to 'raw' encryption_mode only.
"""
keys = configuration.Field(List[RawKeyConfig]).cast()
"""A list of encryption keys to use.
Applies to 'raw' encryption_mode only."""
content_id = configuration.Field(
configuration.HexString, default=RANDOM_CONTENT_ID).cast()
"""The content ID, in hex.
If omitted, a random content ID will be chosen for you.
Applies to 'widevine' encryption_mode only.
"""
key_server_url = configuration.Field(str, default=UAT_SERVER).cast()
"""The URL of your key server.
This is used to generate an encryption key. By default, it is Widevine's UAT
server.
Applies to 'widevine' encryption_mode only.
"""
signer = configuration.Field(str, default=WIDEVINE_TEST_ACCOUNT).cast()
"""The name of the signer when authenticating to the key server.
Applies to 'widevine' encryption_mode only.
Defaults to the Widevine test account.
"""
signing_key = configuration.Field(
configuration.HexString, default=WIDEVINE_TEST_SIGNING_KEY).cast()
"""The signing key, in hex, when authenticating to the key server.
Applies to 'widevine' encryption_mode only.
Defaults to the Widevine test account's key.
"""
signing_iv = configuration.Field(
configuration.HexString, default=WIDEVINE_TEST_SIGNING_IV).cast()
"""The signing IV, in hex, when authenticating to the key server.
Applies to 'widevine' encryption_mode only.
Defaults to the Widevine test account's IV.
"""
protection_scheme = configuration.Field(ProtectionScheme,
default=ProtectionScheme.CENC).cast()
"""The protection scheme (cenc or cbcs) to use when encrypting."""
clear_lead = configuration.Field(int, default=10).cast()
"""The seconds of unencrypted media at the beginning of the stream."""
def __init__(self, *args) -> None:
super().__init__(*args)
# Don't do any further checks if encryption is disabled
if not self.enable:
return
if self.encryption_mode == EncryptionMode.WIDEVINE:
field_names = ['keys', 'pssh', 'iv']
for field_name in field_names:
if getattr(self, field_name):
field = getattr(self.__class__, field_name)
reason = 'cannot be set when encryption_mode is "%s"' % \
self.encryption_mode
raise configuration.MalformedField(
self.__class__, field_name, field, reason)
elif self.encryption_mode == EncryptionMode.RAW:
# Check at least one key has been specified
if not self.keys:
field = self.__class__.keys
reason = 'at least one key must be specified'
raise configuration.MalformedField(
self.__class__, 'keys', field, reason)
class PipelineConfig(configuration.Base):
"""An object representing the entire pipeline config for Shaka Streamer."""
streaming_mode = configuration.Field(StreamingMode, required=True).cast()
"""The streaming mode, which can be either 'vod' or 'live'."""
quiet = configuration.Field(bool, default=False).cast()
"""If true, reduce the level of output.
Only errors will be shown in quiet mode.
"""
debug_logs = configuration.Field(bool, default=False).cast()
"""If true, output simple log files from each node.
No control is given over log filenames. Logs are written to the current
working directory. We do not yet support log rotation. This is meant only
for debugging.
"""
hwaccel_api = configuration.Field(str, default=DEFAULT_HWACCEL_API).cast()
"""The FFmpeg hardware acceleration API to use with hardware codecs.
A per-platform default will be chosen if this field is omitted.
See documentation here: https://trac.ffmpeg.org/wiki/HWAccelIntro
"""
resolutions = configuration.Field(
List[bitrate_configuration.VideoResolutionName],
required=True).cast()
"""A list of resolution names to encode.
Any resolution greater than the input resolution will be ignored, to avoid
upscaling the content. This also allows you to reuse a pipeline config for
multiple inputs.
"""
# TODO(joeyparrish): Default to whatever is in the input.
channels = configuration.Field(int, default=2).cast()
"""The number of audio channels to encode."""
audio_codecs = configuration.Field(
List[bitrate_configuration.AudioCodec],
default=[bitrate_configuration.AudioCodec.AAC]).cast()
"""The audio codecs to encode with."""
video_codecs = configuration.Field(
List[bitrate_configuration.VideoCodec],
default=[bitrate_configuration.VideoCodec.H264]).cast()
"""The video codecs to encode with.
Note that the prefix "hw:" indicates that a hardware encoder should be
used.
"""
manifest_format = configuration.Field(List[ManifestFormat],
default=[
ManifestFormat.DASH,
ManifestFormat.HLS,
]).cast()
"""A list of manifest formats (dash or hls) to create.
By default, this will create both.
"""
dash_output = configuration.Field(str, default='dash.mpd').cast()
"""Output filename for the DASH manifest, if created."""
hls_output = configuration.Field(str, default='hls.m3u8').cast()
"""Output filename for the HLS master playlist, if created."""
segment_folder = configuration.Field(str, default='').cast()
"""Sub-folder for segment output (or blank for none)."""
segment_size = configuration.Field(float, default=4).cast()
"""The length of each segment in seconds."""
segment_per_file = configuration.Field(bool, default=True).cast()
"""If true, force each segment to be in a separate file.
Must be true for live content.
"""
availability_window = configuration.Field(int, default=300).cast()
"""The number of seconds a segment remains available."""
presentation_delay = configuration.Field(int, default=30).cast()
"""How far back from the live edge the player should be, in seconds."""
update_period = configuration.Field(int, default=8).cast()
"""How often the player should fetch a new manifest, in seconds."""
encryption = configuration.Field(EncryptionConfig,
default=EncryptionConfig({})).cast()
"""Encryption settings."""
def __init__(self, *args) -> None:
super().__init__(*args)
if self.streaming_mode == StreamingMode.LIVE and not self.segment_per_file:
field = self.__class__.segment_per_file
reason = 'must be true when streaming_mode is "live"'
raise configuration.MalformedField(
self.__class__, 'segment_per_file', field, reason)
def get_resolutions(self) -> List[bitrate_configuration.VideoResolution]:
VideoResolution = bitrate_configuration.VideoResolution # alias
return [VideoResolution.get_value(name) for name in self.resolutions]
| apache-2.0 | -4,969,859,681,184,087,000 | 32.759494 | 94 | 0.695819 | false | 3.873638 | true | false | false |
KmolYuan/Pyslvs-PyQt5 | pyslvs_ui/graphics/canvas.py | 1 | 24144 | # -*- coding: utf-8 -*-
"""All color options in Pyslvs."""
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2019"
__license__ = "AGPL"
__email__ = "[email protected]"
from typing import (
Tuple,
List,
Sequence,
Set,
Dict,
Iterator,
Any,
Union,
Optional,
ClassVar,
)
from abc import abstractmethod
from dataclasses import dataclass
from enum import auto, unique, IntEnum
from math import radians, sin, cos, atan2, hypot, isnan
from functools import reduce
from qtpy.QtCore import Slot, Qt, QPointF, QRectF, QSizeF
from qtpy.QtWidgets import QWidget, QSizePolicy
from qtpy.QtGui import (
QPolygonF,
QPainter,
QBrush,
QPen,
QColor,
QFont,
QPainterPath,
QImage,
QPaintEvent,
)
from pyslvs import VPoint, Graph, edges_view, parse_pos
from pyslvs_ui.qt_patch import QABCMeta
from .color import color_num, color_qt, target_path_style
LINK_COLOR = QColor(226, 219, 190)
_Coord = Tuple[float, float]
def convex_hull(
points: List[_Coord],
*,
as_qpoint: bool = False
) -> Union[List[_Coord], List[QPointF]]:
"""Returns points on convex hull in counterclockwise order
according to Graham's scan algorithm.
"""
def cmp(a: float, b: float) -> int:
return (a > b) - (a < b)
def turn(p: _Coord, q: _Coord, r: _Coord) -> int:
px, py = p
qx, qy = q
rx, ry = r
return cmp((qx - px) * (ry - py) - (rx - px) * (qy - py), 0)
def keep_left(hull: List[_Coord], r: _Coord) -> List[_Coord]:
while len(hull) > 1 and turn(hull[-2], hull[-1], r) != 1:
hull.pop()
if not hull or hull[-1] != r:
hull.append(r)
return hull
points.sort()
lower: List[Tuple[float, float]] = reduce(keep_left, points, [])
upper: List[Tuple[float, float]] = reduce(keep_left, reversed(points), [])
lower.extend(upper[i] for i in range(1, len(upper) - 1))
result = []
for x, y in lower:
if as_qpoint:
result.append(QPointF(x, y))
else:
result.append((x, y))
return result
@dataclass(repr=False, eq=False)
class _PathOption:
"""Path option class.
Attributes:
+ Path data (-1: Hide, 0: Preview path data)
+ Show mode parameter.
+ The path will be the curve, otherwise using the points.
"""
path: Sequence[Sequence[_Coord]] = ()
show: int = -1
curve: bool = True
@unique
class _TickMark(IntEnum):
"""The status of tick mark."""
hide = auto()
show = auto()
show_num = auto()
class BaseCanvas(QWidget, metaclass=QABCMeta):
"""The subclass can draw a blank canvas more easier."""
@abstractmethod
def __init__(self, parent: QWidget) -> None:
"""Set the parameters for drawing."""
super(BaseCanvas, self).__init__(parent)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self.setFocusPolicy(Qt.StrongFocus)
self.painter = QPainter()
# Origin coordinate
self.ox = self.width() / 2
self.oy = self.height() / 2
# Canvas zoom rate
self.zoom = 1.
# Joint size
self.joint_size = 5
# Canvas line width
self.link_width = 3
self.path_width = 3
# Font size
self.font_size = 15
# Show point mark or dimension
self.show_ticks = _TickMark.show
self.show_point_mark = True
self.show_dimension = True
# Path track
self.path = _PathOption()
# Path solving
self.ranges: Dict[str, QRectF] = {}
self.target_path: Dict[str, Sequence[_Coord]] = {}
self.show_target_path = False
# Background
self.background = QImage()
self.background_opacity = 1.
self.background_scale = 1.
self.background_offset = QPointF(0, 0)
# Monochrome mode
self.monochrome = False
# Grab mode
self.__grab_mode = False
def switch_grab(self) -> None:
"""Start grab mode."""
self.__grab_mode = not self.__grab_mode
@staticmethod
def zoom_factor(
width: int,
height: int,
x_right: float,
x_left: float,
y_top: float,
y_bottom: float
) -> float:
"""Calculate the zoom factor."""
x_diff = x_left - x_right
y_diff = y_top - y_bottom
x_diff = x_diff if x_diff else 1.
y_diff = y_diff if y_diff else 1.
if width / x_diff < height / y_diff:
return width / x_diff
else:
return height / y_diff
@abstractmethod
def paintEvent(self, event: QPaintEvent) -> None:
"""Using a QPainter under 'self',
so just change QPen or QBrush before painting.
"""
if not self.__grab_mode:
self.painter.begin(self)
self.painter.fillRect(event.rect(), QBrush(Qt.white))
# Translation
self.painter.translate(self.ox, self.oy)
# Background
if not self.background.isNull():
rect = self.background.rect()
self.painter.setOpacity(self.background_opacity)
self.painter.drawImage(QRectF(
self.background_offset * self.zoom,
QSizeF(rect.width(), rect.height())
* self.background_scale * self.zoom
), self.background, QRectF(rect))
self.painter.setOpacity(1)
# Show frame
pen = QPen(Qt.blue)
pen.setWidth(1)
self.painter.setPen(pen)
self.painter.setFont(QFont("Arial", self.font_size))
# Draw origin lines
if self.show_ticks not in {_TickMark.show, _TickMark.show_num}:
return
pen.setColor(Qt.gray)
self.painter.setPen(pen)
x_l = -self.ox
x_r = self.width() - self.ox
self.painter.drawLine(QPointF(x_l, 0), QPointF(x_r, 0))
y_t = self.height() - self.oy
y_b = -self.oy
self.painter.drawLine(QPointF(0, y_b), QPointF(0, y_t))
def indexing(v: float) -> int:
"""Draw tick."""
return int(v / self.zoom - v / self.zoom % 5)
# Draw tick
for x in range(indexing(x_l), indexing(x_r) + 1, 5):
if x == 0:
continue
is_ten = x % 10 == 0
end = QPointF(x * self.zoom, -10 if is_ten else -5)
self.painter.drawLine(QPointF(x, 0) * self.zoom, end)
if self.show_ticks == _TickMark.show_num and is_ten:
self.painter.drawText(end + QPointF(0, 3), f"{x}")
for y in range(indexing(y_b), indexing(y_t) + 1, 5):
if y == 0:
continue
is_ten = y % 10 == 0
end = QPointF(10 if is_ten else 5, y * self.zoom)
self.painter.drawLine(QPointF(0, y) * self.zoom, end)
if self.show_ticks == _TickMark.show_num and is_ten:
self.painter.drawText(end + QPointF(3, 0), f"{-y}")
# Please to call the "end" method when ending paint event.
def draw_circle(self, p: QPointF, r: float) -> None:
"""Draw circle."""
self.painter.drawEllipse(p, r, r)
def draw_point(
self,
i: int,
cx: float,
cy: float,
fixed: bool,
color: Optional[Tuple[int, int, int]],
mul: int = 1
) -> None:
"""Draw a joint."""
if self.monochrome or color is None:
color = Qt.black
else:
color = QColor(*color)
pen = QPen(color)
pen.setWidth(2)
self.painter.setPen(pen)
x = cx * self.zoom
y = cy * -self.zoom
if fixed:
# Draw a triangle below
self.painter.drawPolygon(
QPointF(x, y),
QPointF(x - self.joint_size, y + 2 * self.joint_size),
QPointF(x + self.joint_size, y + 2 * self.joint_size)
)
r = self.joint_size
for _ in range(1 if mul < 1 else mul):
self.draw_circle(QPointF(x, y), r)
r += 5
if not self.show_point_mark:
return
pen.setColor(Qt.darkGray)
pen.setWidth(2)
self.painter.setPen(pen)
text = f"[Point{i}]"
if self.show_dimension:
text += f":({cx:.02f}, {cy:.02f})"
self.painter.drawText(QPointF(x, y) + QPointF(6, -6), text)
def draw_slvs_ranges(self) -> None:
"""Draw solving range."""
pen = QPen()
pen.setWidth(5)
for i, (tag, rect) in enumerate(self.ranges.items()):
range_color = QColor(color_num(i + 1))
range_color.setAlpha(30)
self.painter.setBrush(range_color)
range_color.setAlpha(255)
pen.setColor(range_color)
self.painter.setPen(pen)
cx = rect.x() * self.zoom
cy = rect.y() * -self.zoom
if rect.width():
self.painter.drawRect(QRectF(
QPointF(cx, cy),
QSizeF(rect.width(), rect.height()) * self.zoom
))
else:
self.draw_circle(QPointF(cx, cy), 3)
range_color.setAlpha(255)
pen.setColor(range_color)
self.painter.setPen(pen)
self.painter.drawText(QPointF(cx, cy) + QPointF(6, -6), tag)
self.painter.setBrush(Qt.NoBrush)
def draw_target_path(self) -> None:
"""Draw solving path."""
pen = QPen()
pen.setWidth(self.path_width)
for i, name in enumerate(sorted(self.target_path)):
path = self.target_path[name]
road, dot, brush = target_path_style(i)
pen.setColor(road)
self.painter.setPen(pen)
self.painter.setBrush(brush)
if len(path) == 1:
x, y = path[0]
p = QPointF(x, -y) * self.zoom
self.painter.drawText(p + QPointF(6, -6), name)
pen.setColor(dot)
self.painter.setPen(pen)
self.draw_circle(p, self.joint_size)
else:
painter_path = QPainterPath()
for j, (x, y) in enumerate(path):
p = QPointF(x, -y) * self.zoom
self.draw_circle(p, self.joint_size)
if j == 0:
self.painter.drawText(p + QPointF(6, -6), name)
painter_path.moveTo(p)
else:
x2, y2 = path[j - 1]
self.__draw_arrow(x, -y, x2, -y2, zoom=True)
painter_path.lineTo(p)
pen.setColor(road)
self.painter.setPen(pen)
self.painter.drawPath(painter_path)
for x, y in path:
pen.setColor(dot)
self.painter.setPen(pen)
self.draw_circle(QPointF(x, -y) * self.zoom, self.joint_size)
self.painter.setBrush(Qt.NoBrush)
def __draw_arrow(
self,
x1: float,
y1: float,
x2: float,
y2: float,
*,
zoom: bool = False,
text: str = ''
) -> None:
"""Front point -> Back point"""
if zoom:
x1 *= self.zoom
y1 *= self.zoom
x2 *= self.zoom
y2 *= self.zoom
a = atan2(y2 - y1, x2 - x1)
x1 = (x1 + x2) / 2 - 7.5 * cos(a)
y1 = (y1 + y2) / 2 - 7.5 * sin(a)
first_point = QPointF(x1, y1)
self.painter.drawLine(first_point, QPointF(
x1 + 15 * cos(a + radians(20)),
y1 + 15 * sin(a + radians(20))
))
self.painter.drawLine(first_point, QPointF(
x1 + 15 * cos(a - radians(20)),
y1 + 15 * sin(a - radians(20))
))
if not text:
return
# Font
font = self.painter.font()
font_copy = QFont(font)
font.setBold(True)
font.setPointSize(font.pointSize() + 8)
self.painter.setFont(font)
# Color
pen = self.painter.pen()
color = pen.color()
pen.setColor(color.darker())
self.painter.setPen(pen)
self.painter.drawText(first_point, text)
pen.setColor(color)
self.painter.setPen(pen)
self.painter.setFont(font_copy)
def draw_curve(self, path: Sequence[_Coord]) -> None:
"""Draw path as curve."""
if len(set(path)) < 2:
return
painter_path = QPainterPath()
error = False
for i, (x, y) in enumerate(path):
if isnan(x):
error = True
self.painter.drawPath(painter_path)
painter_path = QPainterPath()
else:
p = QPointF(x, -y) * self.zoom
if i == 0:
painter_path.moveTo(p)
self.draw_circle(p, 2)
continue
if error:
painter_path.moveTo(p)
error = False
else:
painter_path.lineTo(p)
self.painter.drawPath(painter_path)
def draw_dot(self, path: Sequence[_Coord]) -> None:
"""Draw path as dots."""
if len(set(path)) < 2:
return
for i, (x, y) in enumerate(path):
if isnan(x):
continue
p = QPointF(x, -y) * self.zoom
if i == 0:
self.draw_circle(p, 2)
else:
self.painter.drawPoint(p)
def solution_polygon(
self,
func: str,
args: Sequence[str],
target: str,
pos: Sequence[VPoint]
) -> Tuple[List[QPointF], QColor]:
"""Get solution polygon."""
if func == 'PLLP':
color = QColor(121, 171, 252)
params = [args[0], args[-1]]
elif func == 'PLAP':
color = QColor(249, 84, 216)
params = [args[0]]
else:
if func == 'PLPP':
color = QColor(94, 255, 185)
else:
# PXY
color = QColor(249, 175, 27)
params = [args[0]]
params.append(target)
tmp_list = []
for name in params:
try:
index = int(name.replace('P', ''))
except ValueError:
continue
else:
vpoint = pos[index]
tmp_list.append(QPointF(vpoint.cx, -vpoint.cy) * self.zoom)
return tmp_list, color
def draw_solution(
self,
func: str,
args: Sequence[str],
target: str,
pos: Sequence[VPoint]
) -> None:
"""Draw the solution triangle."""
points, color = self.solution_polygon(func, args, target, pos)
color.setAlpha(150)
pen = QPen(color)
pen.setWidth(self.joint_size)
self.painter.setPen(pen)
def draw_arrow(index: int, text: str) -> None:
"""Draw arrow."""
self.__draw_arrow(
points[-1].x(),
points[-1].y(),
points[index].x(),
points[index].y(),
text=text
)
draw_arrow(0, args[1])
if func == 'PLLP':
draw_arrow(1, args[2])
color.setAlpha(30)
self.painter.setBrush(QBrush(color))
self.painter.drawPolygon(QPolygonF(points))
self.painter.setBrush(Qt.NoBrush)
@Slot(int)
def set_show_ticks(self, show: int):
"""Set the appearance of tick mark."""
self.show_ticks = _TickMark(show + 1)
self.update()
@Slot(bool)
def set_monochrome_mode(self, monochrome: bool) -> None:
"""Set monochrome mode."""
self.monochrome = monochrome
self.update()
class PreviewCanvas(BaseCanvas):
"""A preview canvas use to show structure diagram."""
view_size: ClassVar[int] = 240
def __init__(self, parent: QWidget) -> None:
"""Input parameters and attributes.
+ Origin graph
+ Customize points: Dict[str, int]
+ Multiple joints: Dict[int, int]
+ Positions: Dict[int, Tuple[float, float]]
+ Joint status: Dict[int, bool]
+ Name dict: Dict['P0', 'A']
"""
super(PreviewCanvas, self).__init__(parent)
self.graph = Graph([])
self.cus: Dict[int, int] = {}
self.same: Dict[int, int] = {}
self.pos: Dict[int, _Coord] = {}
self.status: Dict[int, bool] = {}
# Additional attributes.
self.grounded = -1
self.driver: Set[int] = set()
self.target: Set[int] = set()
self.clear()
def clear(self) -> None:
"""Clear the attributes."""
self.graph = Graph([])
self.cus.clear()
self.same.clear()
self.pos.clear()
self.status.clear()
self.grounded = -1
self.driver.clear()
self.target.clear()
self.update()
def paintEvent(self, event: QPaintEvent) -> None:
"""Draw the structure."""
width = self.width()
height = self.height()
if self.pos:
x_right, x_left, y_top, y_bottom = self.__zoom_to_fit_limit()
self.zoom = self.zoom_factor(
width,
height,
x_right,
x_left,
y_top,
y_bottom
) * 0.75
self.ox = width / 2 - (x_left + x_right) / 2 * self.zoom
self.oy = height / 2 + (y_top + y_bottom) / 2 * self.zoom
else:
if width <= height:
self.zoom = width / PreviewCanvas.view_size
else:
self.zoom = height / PreviewCanvas.view_size
self.ox = width / 2
self.oy = height / 2
super(PreviewCanvas, self).paintEvent(event)
pen = QPen()
pen.setWidth(self.joint_size)
self.painter.setPen(pen)
if self.monochrome:
color = QColor(Qt.darkGray)
else:
color = LINK_COLOR
color.setAlpha(150)
self.painter.setBrush(QBrush(color))
# Links
for link in self.graph.vertices:
if link == self.grounded:
continue
points = []
# Points that is belong with the link.
for num, edge in edges_view(self.graph):
if link in edge:
if num in self.same:
num = self.same[num]
x, y = self.pos[num]
points.append((x * self.zoom, y * -self.zoom))
# Customize points.
for name, link_ in self.cus.items():
if link == link_:
x, y = self.pos[name]
points.append((x * self.zoom, y * -self.zoom))
self.painter.drawPolygon(*convex_hull(points, as_qpoint=True))
# Nodes
for node, (x, y) in self.pos.items():
if node in self.same:
continue
x *= self.zoom
y *= -self.zoom
if self.monochrome:
color = Qt.black
elif node in self.driver:
color = color_qt('Red')
elif node in self.target:
color = color_qt('Orange')
elif self.get_status(node):
color = color_qt('Green')
else:
color = color_qt('Blue')
pen.setColor(color)
self.painter.setPen(pen)
self.painter.setBrush(QBrush(color))
self.draw_circle(QPointF(x, y), self.joint_size)
pen.setColor(Qt.black)
self.painter.setPen(pen)
# Text of node.
pen.setColor(Qt.black)
self.painter.setPen(pen)
for node, (x, y) in self.pos.items():
if node in self.same:
continue
x *= self.zoom
x += 2 * self.joint_size
y *= -self.zoom
y -= 2 * self.joint_size
self.painter.drawText(QPointF(x, y), f'P{node}')
self.painter.end()
def __zoom_to_fit_limit(self) -> Tuple[float, float, float, float]:
"""Limitations of four side."""
inf = float('inf')
x_right = inf
x_left = -inf
y_top = -inf
y_bottom = inf
for x, y in self.pos.values():
if x < x_right:
x_right = x
if x > x_left:
x_left = x
if y < y_bottom:
y_bottom = y
if y > y_top:
y_top = y
return x_right, x_left, y_top, y_bottom
def set_graph(self, graph: Graph, pos: Dict[int, _Coord]) -> None:
"""Set the graph from NetworkX graph type."""
self.graph = graph
self.pos = pos
self.status = {k: False for k in pos}
self.update()
def set_grounded(self, link: int) -> None:
"""Set the grounded link number."""
self.grounded = link
for n, edge in edges_view(self.graph):
self.status[n] = self.grounded in edge
for n, link in self.cus.items():
self.status[n] = self.grounded == link
self.update()
def set_driver(self, input_list: List[Tuple[int, int]]) -> None:
"""Set driver nodes."""
self.driver.clear()
self.driver.update(pair[0] for pair in input_list)
self.update()
def set_target(self, points: Sequence[int]) -> None:
"""Set target nodes."""
self.target.clear()
self.target.update(points)
self.update()
def set_status(self, point: str, status: bool) -> None:
"""Set status node."""
self.status[int(point.replace('P', ''))] = status
self.update()
def get_status(self, point: int) -> bool:
"""Get status. If multiple joints, return true."""
return self.status[point] or (point in self.same)
@staticmethod
def grounded_detect(
placement: Set[int],
g: Graph,
same: Dict[int, int]
) -> Iterator[int]:
"""Find the grounded link."""
links: List[Set[int]] = [set() for _ in range(len(g.vertices))]
for joint, link in edges_view(g):
for node in link:
links[node].add(joint)
for row, link in enumerate(links):
if placement == link - set(same):
# Return once
yield row
return
def from_profile(self, params: Dict[str, Any]) -> None:
"""Simple load by dict object."""
# Customize points and multiple joints
g = Graph(params['graph'])
expression: str = params['expression']
pos_list = parse_pos(expression)
cus: Dict[int, int] = params['cus']
same: Dict[int, int] = params['same']
self.cus = cus
self.same = same
for node, ref in sorted(self.same.items()):
pos_list.insert(node, pos_list[ref])
self.set_graph(g, {i: (x, y) for i, (x, y) in enumerate(pos_list)})
# Grounded setting
for row in self.grounded_detect(set(params['placement']), g, self.same):
self.set_grounded(row)
# Driver setting
input_list: List[Tuple[Tuple[int, int], Tuple[float, float]]] = params['input']
self.driver.clear()
self.driver.update(b for (b, _), _ in input_list)
# Target setting
target: Dict[int, Sequence[_Coord]] = params['target']
self.target.clear()
self.target.update(target)
self.update()
def is_all_lock(self) -> bool:
"""Is all joint has solution."""
for node, status in self.status.items():
if not status and node not in self.same:
return False
return True
def distance(self, n1: int, n2: int) -> float:
"""Return the distance of two point."""
x1, y1 = self.pos[n1]
x2, y2 = self.pos[n2]
return hypot(x1 - x2, y1 - y2)
| agpl-3.0 | 549,380,833,281,812,030 | 30.936508 | 87 | 0.508159 | false | 3.613289 | false | false | false |
qitta/libhugin | hugin/harvest/provider/ofdb/ofdbcommon.py | 1 | 4205 | #!/usr/bin/env python
# encoding: utf-8
""" Common ofdb provider stuff for parsing and extracting values. """
# stdlib
import json
import re
from collections import deque
class OFDBCommon:
def __init__(self):
self._urls = [
# seems to be currently the only correct working mirror, other
# mirrors wont't find e.g. plot metadata, even if it exists
'http://ofdbgw.geeksphere.de/{path}/{query}',
#'http://ofdbgw.home-of-root.de/{path}/{query}'
# those mirrors seems to be broken
#'http://ofdbgw.scheeper.de/{path}/{query}'
#'http://ofdbgw.johann-scharl.de/{path}/{query}'
#'http://ofdbgw.org/{path}/{query}'
#'http://ofdbgw.h1915283.stratoserver.net/{path}/{query}'
#'http://ofdbgw.metawave.ch/{path}/{query}'
]
self.base_url = self._get_url_iter(self._urls)
def _get_url_iter(self, urls):
url_list = deque(urls)
while True:
yield url_list[0]
url_list.rotate(1)
def get_base_url(self):
return next(self.base_url)
def _try_sanitize(self, response):
""" Try to sanitize a broken response containing a valid json doc. """
try:
splited = response.splitlines()
response = ''
for item in splited:
if re.search('{\s*"ofdbgw"', item):
response = item
break
return json.loads(response).get('ofdbgw')
except (TypeError, ValueError, AttributeError):
return False
def _build_urllist_from_idlist(self, ids, path):
""" Build list with urls out of given ids. """
url_list = []
for ofdbid in ids:
url = self.get_base_url().format(path=path, query=ofdbid)
url_list.append([url])
return url_list
def validate_url_response(self, url_response):
""" Validate a url-response tuple and load json response. """
try:
url, response = url_response.pop()
return 'ok', (None, None), url, json.loads(response).get('ofdbgw')
except (ValueError, AttributeError, TypeError):
response = self._try_sanitize(response)
if response is False:
return 'critical', (None, True), url, response
else:
return 'ok', (None, None), url, response
def personids_to_urllist(self, ids):
""" Build person provider urllist from person ids. """
return self._build_urllist_from_idlist(ids, 'person_json')
def movieids_to_urllist(self, ids):
""" Build movie provider urllist from person ids. """
return self._build_urllist_from_idlist(ids, 'movie_json')
def check_response_status(self, response):
"""
Validates the http response object status.
Possible error codes that may apear in the valid json http response::
0 = Keine Fehler
1 = Unbekannter Fehler
2 = Fehler oder Timeout bei Anfrage an IMDB bzw. OFDB
3 = Keine oder falsche ID angebene
4 = Keine Daten zu angegebener ID oder Query gefunden
5 = Fehler bei der Datenverarbeitung
9 = Wartungsmodus, OFDBGW derzeit nicht verfügbar.
Returns a state flag and a return value specific to its error code. For
possible flags an return value tuples see code block below.
:param response: A json http response object.
:returns: A tuple containing a status flag and a return value.
"""
status = response['status']['rcode']
return_code = {
'unknown_error': [1, 2, 5],
'no_data_found': [4, 9],
'critical_error': [3],
'valid_response': [0]
}
if status in return_code['critical_error']:
return 'critical', (None, True)
if status in return_code['unknown_error']:
return 'unknown', (None, False)
if status in return_code['no_data_found']:
return 'no_data', ([], True)
if status in return_code['valid_response']:
return 'valid', ()
| gpl-3.0 | -8,164,336,351,595,135,000 | 34.327731 | 79 | 0.570409 | false | 3.917987 | false | false | false |
pombredanne/MOG | nova/tests/virt/vmwareapi/test_vmwareapi_vif.py | 3 | 7424 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.network import model as network_model
from nova import test
from nova.tests import matchers
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif
class VMwareVifTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVifTestCase, self).setUp()
self.flags(vlan_interface='vmnet0', group='vmware')
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
self.session = "fake"
self.cluster = None
def tearDown(self):
super(VMwareVifTestCase, self).tearDown()
def test_ensure_vlan_bridge(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3,
self.cluster)
network_util.get_network_with_the_name('fake', 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True)
# FlatDHCP network mode without vlan - network doesn't exist with the host
def test_ensure_vlan_bridge_without_vlan(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0,
self.cluster)
network_util.get_network_with_the_name('fake', 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# FlatDHCP network mode without vlan - network exists with the host
# Get vswitch and check vlan interface should not be called
def test_ensure_vlan_bridge_with_network(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
vm_network = {'name': 'VM Network', 'type': 'Network'}
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(vm_network)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# Flat network mode with DVS
def test_ensure_vlan_bridge_with_existing_dvs(self):
network_ref = {'dvpg': 'dvportgroup-2062',
'type': 'DistributedVirtualPortgroup'}
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(network_ref)
self.mox.ReplayAll()
ref = vif.ensure_vlan_bridge(self.session,
self.vif,
create_vlan=False)
self.assertThat(ref, matchers.DictMatches(network_ref))
def test_get_network_ref_neutron(self):
self.mox.StubOutWithMock(vif, 'get_neutron_network')
vif.get_neutron_network(self.session, 'fa0', self.cluster, self.vif)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, True)
def test_get_network_ref_flat_dhcp(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=False)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_get_network_ref_bridge(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=True)
self.mox.ReplayAll()
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True,
should_create_vlan=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
vif.get_network_ref(self.session, self.cluster, self.vif, False)
| apache-2.0 | 2,194,094,830,972,222,000 | 45.4 | 78 | 0.582705 | false | 4.067945 | true | false | false |
hiidef/hiitrack-api | hiitrack/controllers/funnel.py | 1 | 9192 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Funnels are a collection of events within a bucket.
"""
from itertools import chain
from twisted.internet.defer import inlineCallbacks, returnValue, DeferredList
from telephus.cassandra.c08.ttypes import NotFoundException
from ..models import bucket_check, user_authorize
from ..models import FunnelModel, EventModel, PropertyModel
from ..lib.authentication import authenticate
from ..exceptions import MissingParameterException
from ..lib.b64encode import uri_b64decode, uri_b64encode, \
b64encode_nested_keys, b64encode_double_nested_keys, b64encode_keys
from ..lib.hash import pack_hash
from ..lib.parameters import require
from ..lib.profiler import profile
def _parse(data, event_ids):
"""Zip responses by data[offset::step]"""
data = [x[1] for x in data]
return (
dict(zip(event_ids, data[0::4])),
dict(zip(event_ids, data[1::4])),
dict(zip(event_ids, data[2::4])),
dict(zip(event_ids, data[3::4])))
def encode_nested_lists(dictionary):
"""
Base64 encodes nested lists.
"""
return dict([(uri_b64encode(k), [(uri_b64encode(x[0]), x[1]) for x in v]) \
for k, v in dictionary.items()])
class Funnel(object):
"""
Funnel.
"""
def __init__(self, dispatcher):
dispatcher.connect(
name='funnel',
route='/{user_name}/{bucket_name}/funnel/{funnel_name}',
controller=self,
action='post',
conditions={"method": "POST"})
dispatcher.connect(
name='funnel',
route='/{user_name}/{bucket_name}/funnel',
controller=self,
action='preview_funnel',
conditions={"method": "GET"})
dispatcher.connect(
name='funnel',
route='/{user_name}/{bucket_name}/funnel/{funnel_name}',
controller=self,
action='get_saved_funnel',
conditions={"method": "GET"})
dispatcher.connect(
name='funnel',
route='/{user_name}/{bucket_name}/funnel/{funnel_name}',
controller=self,
action='delete',
conditions={"method": "DELETE"})
@authenticate
@user_authorize
@bucket_check
@require("event_id", "description")
@profile
@inlineCallbacks
def post(self, request, user_name, bucket_name, funnel_name):
"""
Create a new funnel.
"""
if len(request.args["event_id"]) < 2:
request.setResponseCode(403)
raise MissingParameterException("Parameter 'event_id' requires "
"at least two values.")
event_ids = [uri_b64decode(x) for x in request.args["event_id"]]
description = request.args["description"][0]
if "property" in request.args:
property_name = request.args["property"][0]
else:
property_name = None
funnel = FunnelModel(user_name, bucket_name, funnel_name)
yield funnel.create(description, event_ids, property_name)
request.setResponseCode(201)
@authenticate
@user_authorize
@bucket_check
@profile
@inlineCallbacks
def preview_funnel(self, request, user_name, bucket_name):
"""
Information about an unsaved funnel.
"""
if "event_id" in request.args:
if len(request.args["event_id"]) < 2:
request.setResponseCode(403)
raise MissingParameterException("Parameter 'event_id' requires"
" at least two values.")
event_ids = [uri_b64decode(x) for x in request.args["event_id"]]
elif "event" in request.args:
if len(request.args["event"]) < 2:
request.setResponseCode(403)
raise MissingParameterException("Parameter 'event' requires"
" at least two values.")
event_ids = [pack_hash((x,)) for x in request.args["event"]]
else:
request.setResponseCode(403)
raise MissingParameterException("Parameters 'event' or 'event_id'"
" required.")
if "property" in request.args:
_property = PropertyModel(
user_name,
bucket_name,
property_name=request.args["property"][0])
else:
_property = None
data = yield _get(user_name, bucket_name, event_ids, _property)
returnValue(data)
@authenticate
@user_authorize
@bucket_check
@profile
@inlineCallbacks
def get_saved_funnel(self, request, user_name, bucket_name, funnel_name):
"""
Information about a saved funnel.
"""
funnel = FunnelModel(user_name, bucket_name, funnel_name)
try:
yield funnel.get()
except NotFoundException:
request.setResponseCode(404)
raise
data = yield _get(user_name, bucket_name, funnel.event_ids, funnel.property)
data["description"] = funnel.description
returnValue(data)
@authenticate
@user_authorize
@bucket_check
@profile
@inlineCallbacks
def delete(self, request, user_name, bucket_name, funnel_name):
"""
Delete funnel.
"""
funnel = FunnelModel(user_name, bucket_name, funnel_name)
yield funnel.delete()
@inlineCallbacks
def _get(user_name, bucket_name, event_ids, _property):
"""
Information about a funnel.
"""
# Combine requests for event data.
deferreds = []
for event_id in event_ids:
event = EventModel(user_name, bucket_name, event_id=event_id)
deferreds.extend([
event.get_total(_property),
event.get_unique_total(_property),
event.get_path(_property),
event.get_unique_path(_property)])
if _property:
deferreds.append(_property.get_values())
data = yield DeferredList(deferreds)
response = {"event_ids": [uri_b64encode(x) for x in event_ids]}
if _property:
property_values = data.pop()[1]
response.update({
"property":{
"name":_property.property_name,
"id": uri_b64encode(_property.id),
"values": b64encode_keys(property_values)}})
_get_with_property(data, event_ids, response)
else:
_get_without_property(data, event_ids, response)
returnValue(response)
def _get_with_property(data, event_ids, response):
"""
Information about a funnel on a property.
"""
totals, unique_totals, paths, unique_paths = _parse(data, event_ids)
property_ids = set(chain(*[x.keys() for x in totals.values()]))
funnels = {}
unique_funnels = {}
for property_id in property_ids - set(event_ids):
event_id = event_ids[0]
_funnel = [(event_id, totals[event_id][property_id])]
unique_funnel = [(event_id, unique_totals[event_id][property_id])]
for i in range(1, len(event_ids)):
event_id = event_ids[i - 1]
new_event_id = event_ids[i]
if event_id not in paths[new_event_id][property_id]:
continue
_funnel.append((
new_event_id,
paths[new_event_id][property_id][event_id]))
unique_funnel.append((
new_event_id,
unique_paths[new_event_id][property_id][event_id]))
funnels[property_id] = _funnel
unique_funnels[property_id] = unique_funnel
response.update({
"totals": b64encode_nested_keys(totals),
"unique_totals": b64encode_nested_keys(unique_totals),
"paths": b64encode_double_nested_keys(paths),
"unique_paths": b64encode_double_nested_keys(unique_paths),
"funnels": encode_nested_lists(funnels),
"unique_funnels": encode_nested_lists(unique_funnels)})
def _get_without_property(data, event_ids, response):
"""
Information about a funnel without a property.
"""
totals, unique_totals, paths, unique_paths = _parse(data, event_ids)
# Full funnel, no properties.
event_id = event_ids[0]
totals = dict([(x, totals[x][x]) for x in totals])
unique_totals = dict([(x, unique_totals[x][x]) for x in unique_totals])
_funnel = [(event_id, totals[event_id])]
unique_funnel = [(event_id, unique_totals[event_id])]
paths = dict([(x, paths[x][x]) for x in paths])
unique_paths = dict([(x, unique_paths[x][x]) for x in unique_paths])
for i in range(1, len(event_ids)):
event_id = event_ids[i - 1]
new_event_id = event_ids[i]
_funnel.append((new_event_id, paths[new_event_id][event_id]))
unique_funnel.append((
new_event_id,
unique_paths[new_event_id][event_id]))
response.update({
"total": b64encode_keys(totals),
"unique_total": b64encode_keys(unique_totals),
"path": b64encode_nested_keys(paths),
"unique_path": b64encode_nested_keys(unique_paths),
"funnel": [(uri_b64encode(x[0]), x[1]) for x in _funnel],
"unique_funnel": [(uri_b64encode(x[0]), x[1]) \
for x in unique_funnel]})
| mit | 2,922,717,380,705,292,000 | 35.188976 | 84 | 0.591057 | false | 3.735067 | false | false | false |
ponl/knnFlow | converters/npy2xyz.py | 1 | 1525 | #!/usr/bin/python2
"""
xyz2npy
Author: Patrick A. O'Neil
License:
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import sys
if len(sys.argv) != 3:
print """
Converts from npy array to xyz format.
Usage:
./npy2xyz.py input/path.npy output/path.xyz
"""
# --- Read Inputs --- #
in_file = sys.argv[1] # Input File (Npy)
out_file = sys.argv[2] # Output File (xyz)
# --- Load Npy --- #
try:
D = np.load(in_file)
except IOError:
print """
Failed to load Npy file!
Exiting.
"""
N = len(D)
d = len(D[0])
# --- Write xyz --- #
ofile = open(out_file,'w')
for p_ in D:
line = ""
for p in p_:
line += str(p) + " "
# Compensate for no rotation coordinate
if d == 3:
# Pad with zeros
line += "0 0 0"
else:
# Remove extra space
line = line[:-1] + "\n"
ofile.write(line)
ofile.close()
| gpl-2.0 | -2,676,461,989,288,240,000 | 20.785714 | 73 | 0.620984 | false | 3.426966 | false | false | false |
SwankSwashbucklers/bottle-builder | bottle-builder/overrides.py | 1 | 2811 | """
Overrides of standard python classes and functions to provide customized
behavior and windows/mac compatibility.
"""
__all__ = [ "Template", "sCall", "sPopen" ]
##### Template #################################################################
from string import Template
from re import compile
class TemplateWrapper:
def __init__(self, cls):
PYTHON_LL = 80
HTML_LL = 80
self.cls = cls
self.headers = [
( # Primary python file header template
compile(r'\$ph{(.*?)}'),
lambda x: "\n\n{1}\n##### {0} {2}\n{1}\n".format(
x.upper(), '#'*PYTHON_LL, '#'*(PYTHON_LL-len(x)-7) )
),
( # Secondary python file header template
compile(r'\$sh{(.*?)}'),
lambda x: "\n### {0} {1}".format(
x, '#'*(PYTHON_LL-len(x)-5) )
),
( # HTML file header template
compile(r'\$wh{(.*?)}'),
lambda x: "<!-- ***** {0} {1} -->".format(
x, '*'*(HTML_LL-len(x)-16) )
)
]
def __call__(self, template):
for header in self.headers:
ptn, tpl = header
for match in ptn.finditer(template):
replacements = ( match.group(0), tpl(match.group(1)) )
template = template.replace(*replacements)
template_obj = self.cls(template)
template_obj.populate = self.populate
return template_obj
@staticmethod
def populate(template, filepath, **kwargs):
for key, value in kwargs.items():
if isinstance(value, list):
kwargs[key] = "\n".join(
[ t[0].safe_substitute(**t[1]) for t in value ]
)
try:
with open(filepath, 'w') as f:
f.write(template.safe_substitute(**kwargs))
except Exception as exception:
raise exception
Template = TemplateWrapper(Template)
##### System Calls #############################################################
from subprocess import Popen, call, DEVNULL, STDOUT, PIPE
from sys import executable
import os
def sPopen(*args):
command, shell = list(args), True
if command[0] == 'python':
command[0] = executable
shell = False
if os.name == 'nt':
from subprocess import CREATE_NEW_CONSOLE
return Popen( command, shell=shell, creationflags=CREATE_NEW_CONSOLE )
else:
return Popen( command, shell=shell )
def sCall(*args):
command, shell = list(args), True
if command[0] == 'python':
command[0] = executable
shell = False
if os.name != 'nt':
shell = False
call( command, shell=shell, stdout=DEVNULL, stderr=STDOUT )
| mit | -1,803,700,425,412,556,500 | 30.58427 | 80 | 0.499822 | false | 4.170623 | false | false | false |
IZSVenezie/VetEpiGIS-Group | plugin/dbconnection.py | 1 | 3204 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
VetEpiGIS-Group
A QGIS plugin
Spatial functions for vet epidemiology
-------------------
begin : 2016-05-06
git sha : $Format:%H$
copyright : (C) 2016 by Norbert Solymosi
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os, shutil
from PyQt5.QtGui import *
from PyQt5.QtCore import pyqtSignal, Qt, QSettings, QCoreApplication, QFile, QFileInfo, QDate, QVariant, \
pyqtSignal, QRegExp, QDateTime, QTranslator, QFile, QDir, QIODevice, QTextStream
from PyQt5.QtWidgets import *
from qgis.core import QgsDataSourceUri
from PyQt5.QtSql import *
import psycopg2
import psycopg2.extensions
# use unicode!
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
from .dbconnection_dialog import Ui_Dialog
class Dialog(QDialog, Ui_Dialog):
def __init__(self):
"""Constructor for the dialog.
"""
QDialog.__init__(self)
self.setupUi(self)
self.plugin_dir = ''
self.settings = ''
#self.comboBox_pg_db.currentIndexChanged.connect(self.seltype)
#self.commandLinkButton.clicked.connect(self.createNewSLdb)
self.toolButton_sl_db.clicked.connect(self.dbSource)
self.radioButton_spatialite.clicked.connect(self.seltype)
self.radioButton_postgis.clicked.connect(self.seltype)
#self.commandLinkButton_2.clicked.connect(self.createPGtables)
# self.lineEdit.setText('/home/sn/dev/QGISplugins/VetEpiGIS/groupdata/c.sqlite')
def dbSource(self):
dbpath = QFileDialog.getOpenFileName(self, 'Select file', QDir.currentPath(), 'SpatiaLite file (*.sqlite *.*)')
dbpath = dbpath[0]
if not dbpath or dbpath =='':
self.lineEdit_spatialite.setText('')
else:
self.lineEdit_spatialite.setText(dbpath)
def seltype(self):
if self.radioButton_spatialite.isChecked():
self.groupBox_postgis.setEnabled(False)
self.groupBox_spatialite.setEnabled(True)
self.radioButton_postgis.setChecked(False)
if self.radioButton_postgis.isChecked():
self.groupBox_spatialite.setEnabled(False)
self.groupBox_postgis.setEnabled(True)
self.radioButton_spatialite.setChecked(False)
| gpl-3.0 | -7,122,294,016,489,848,000 | 34.208791 | 119 | 0.556492 | false | 4.395062 | false | false | false |
tadejpetric/neuralsound | read.py | 1 | 2323 | '''
reads wav file
http://soundfile.sapp.org/doc/WaveFormat/
https://blogs.msdn.microsoft.com/dawate/2009/06/23/intro-to-audio-programming-part-2-demystifying-the-wav-format/
'''
import numpy as np #for 2d array
def read_data(name):
with open(name, "rb") as f:
sGroupID1 = f.read(4).decode('ascii') #always RIFF
dwFileLength = int.from_bytes(f.read(4), byteorder='little') #file size minus 8 in bytes
sRiffType = f.read(4).decode('ascii') #always "WAVE"
sGroupID2 = f.read(4).decode('ascii') #always "fmt "
dwHeaderChunkSize = int.from_bytes(f.read(4), byteorder='little') #size of next header section in bytes
subchunk = f.read(dwHeaderChunkSize) #reads the section above
wFormatTag = int.from_bytes(subchunk[:2], byteorder='little') #always 1 for PCM files (basically everything)
wChannels = int.from_bytes(subchunk[2:4], byteorder='little') #amount of channels (1 = mono, 2 stereo...)
dwSamplesPerSec = int.from_bytes(subchunk[4:8], byteorder='little') #samples taken per sec [Hz]
dwAvgBytesPerSec = int.from_bytes(subchunk[8:12], byteorder='little') #amount of bytes/ sec, useful for mem allocation
wBlockAlign = int.from_bytes(subchunk[12:14], byteorder='little') #amount of bytes for a sample*channels
dwBitsPerSample = int.from_bytes(subchunk[14:16], byteorder='little') #bits per sample (8 = 8bit song, 16 = 2byte...)
extraParams = subchunk[16:] #stores extra parameters
sGroupID3 = f.read(4).decode('ascii') #always "data"
dwChunkSize = int.from_bytes(f.read(4), byteorder='little') #size of data field in bytes
data = f.read(dwChunkSize) #reads data field, stores in bytes datatype
sound = np.zeros((wChannels, dwChunkSize//wChannels)) #creates numpy array. y size = n channels; x size = n of samples
j = 0 #j cycles between channels
for i in range(dwChunkSize//(dwBitsPerSample//8)):
sound[j][(i//wChannels)] = int.from_bytes(data[i:i+dwBitsPerSample//8], byteorder='little')
#j selects channel; i/channels tells y position !integer division, decimals cut!
j += 1
j = j % wChannels #when j= n of channels, resets back to 0
#print(sound) #debug
return sound
| mit | -5,286,009,402,355,119,000 | 66.323529 | 126 | 0.65734 | false | 3.401171 | false | false | false |
dougwig/acos-client | acos_client/v30/slb/service_group.py | 1 | 3898 | # Copyright 2014, Jeff Buttars, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import acos_client.errors as acos_errors
import acos_client.v30.base as base
from member import Member
class ServiceGroup(base.BaseV30):
url_prefix = '/slb/service-group/'
@property
def member(self):
return Member(self.client)
# Valid LB methods
ROUND_ROBIN = 'round-robin'
WEIGHTED_ROUND_ROBIN = 'weighted-rr'
LEAST_CONNECTION = 'least-connection'
WEIGHTED_LEAST_CONNECTION = 'weighted-least-connection'
LEAST_CONNECTION_ON_SERVICE_PORT = 'service-least-connection'
WEIGHTED_LEAST_CONNECTION_ON_SERVICE_PORT = \
'service-weighted-least-connection'
FAST_RESPONSE_TIME = 'fastest-response'
LEAST_REQUEST = 'least-request'
STRICT_ROUND_ROBIN = 'round-robin-strict'
STATELESS_SOURCE_IP_HASH = 'stateless-src-ip-hash'
STATELESS_SOURCE_IP_HASH_ONLY = 'stateless-src-ip-only-hash'
STATELESS_DESTINATION_IP_HASH = 'stateless-dst-ip-hash'
STATELESS_SOURCE_DESTINATION_IP_HASH = 'stateless-src-dst-ip-hash'
STATELESS_PER_PACKET_ROUND_ROBIN = 'stateless-per-pkt-round-robin'
# Valid protocols
TCP = 'tcp'
UDP = 'udp'
def get(self, name, **kwargs):
return self._get(self.url_prefix + name, **kwargs)
def _set(self, name, protocol=None, lb_method=None, hm_name=None,
update=False, **kwargs):
# Normalize "" -> None for json
hm_name = hm_name or None
# v30 needs unit tests badly...
params = {
"service-group": self.minimal_dict({
"name": name,
"protocol": protocol,
})
}
# If we explicitly disable health checks, ensure it happens
# Else, we implicitly disable health checks if not specified.
health_check_disable = 1 if kwargs.get("health_check_disable", False) else 0
# When enabling/disabling a health monitor, you can't specify
# health-check-disable and health-check at the same time.
if hm_name is None:
params["service-group"]["health-check-disable"] = health_check_disable
else:
params["service-group"]["health-check"] = hm_name
if lb_method is None:
pass
elif lb_method[-16:] == 'least-connection':
params['service-group']['lc-method'] = lb_method
elif lb_method[:9] == 'stateless':
params['service-group']['stateless-lb-method'] = lb_method
else:
params['service-group']['lb-method'] = lb_method
if not update:
name = ''
self._post(self.url_prefix + name, params, **kwargs)
def create(self, name, protocol=TCP, lb_method=ROUND_ROBIN, **kwargs):
try:
self.get(name)
except acos_errors.NotFound:
pass
else:
raise acos_errors.Exists
self._set(name, protocol, lb_method, **kwargs)
def update(self, name, protocol=None, lb_method=None, health_monitor=None,
**kwargs):
self._set(name, protocol, lb_method,
health_monitor, update=True, **kwargs)
def delete(self, name):
self._delete(self.url_prefix + name)
def stats(self, name, *args, **kwargs):
return self._get(self.url_prefix + name + "/stats", **kwargs)
| apache-2.0 | -3,985,094,053,644,181,500 | 34.117117 | 84 | 0.628271 | false | 3.680831 | false | false | false |
pombredanne/dGit | setup.py | 1 | 1762 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
APP_NAME = 'legit'
APP_SCRIPT = './legit_r'
VERSION = '0.1.0'
# Grab requirments.
with open('reqs.txt') as f:
required = f.readlines()
settings = dict()
# Publish Helper.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
# Build Helper.
if sys.argv[-1] == 'build':
try:
import py2exe
except ImportError:
print 'py2exe is required to continue.'
sys.exit(1)
sys.argv.append('py2exe')
settings.update(
console=[{'script': APP_SCRIPT}],
zipfile = None,
options = {
'py2exe': {
'compressed': 1,
'optimize': 0,
'bundle_files': 1}})
settings.update(
name=APP_NAME,
version=VERSION,
description='Sexy Git CLI, Inspired by GitHub for Mac.',
long_description=open('README.rst').read(),
author='Kenneth Reitz',
author_email='[email protected]',
url='https://github.com/kennethreitz/legit',
packages= ['legit',],
install_requires=required,
license='BSD',
classifiers=(
# 'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
),
entry_points={
'console_scripts': [
'legit = legit.cli:main',
],
}
)
setup(**settings)
| bsd-3-clause | 1,204,772,857,594,329,300 | 20.753086 | 60 | 0.575482 | false | 3.640496 | false | false | false |
liorvh/Empire | lib/modules/situational_awareness/host/computerdetails.py | 19 | 4385 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-ComputerDetails',
'Author': ['@JosephBialek'],
'Description': ('Enumerates useful information on the system. By default, all checks are run.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Recon/Get-ComputerDetails.ps1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'4648' : {
'Description' : 'Switch. Only return 4648 logon information (RDP to another machine).',
'Required' : False,
'Value' : ''
},
'4624' : {
'Description' : 'Switch. Only return 4624 logon information (logons to this machine).',
'Required' : False,
'Value' : ''
},
'AppLocker' : {
'Description' : 'Switch. Only return AppLocker logs.',
'Required' : False,
'Value' : ''
},
'PSScripts' : {
'Description' : 'Switch. Only return PowerShell scripts run from operational log.',
'Required' : False,
'Value' : ''
},
'SavedRDP' : {
'Description' : 'Switch. Only return saved RDP connections.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/host/Get-ComputerDetails.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if option == "4648":
script += "$SecurityLog = Get-EventLog -LogName Security;$Filtered4624 = Find-4624Logons $SecurityLog;Write-Output $Filtered4624.Values | Format-List"
return script
if option == "4624":
script += "$SecurityLog = Get-EventLog -LogName Security;$Filtered4648 = Find-4648Logons $SecurityLog;Write-Output $Filtered4648.Values | Format-List"
return script
if option == "AppLocker":
script += "$AppLockerLogs = Find-AppLockerLogs;Write-Output $AppLockerLogs.Values | Format-List"
return script
if option == "PSLogs":
script += "$PSLogs = Find-PSScriptsInPSAppLog;Write-Output $PSLogs.Values | Format-List"
return script
if option == "SavedRDP":
script += "$RdpClientData = Find-RDPClientConnections;Write-Output $RdpClientData.Values | Format-List"
return script
# if we get to this point, no switched were specified
return script + "Get-ComputerDetails -ToString"
| bsd-3-clause | -70,047,547,855,070,470 | 38.151786 | 174 | 0.489852 | false | 4.50668 | false | false | false |
jnishi/chainer | tests/chainerx_tests/unit_tests/routines_tests/test_math.py | 2 | 21599 | import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_negative(xp, device, shape, dtype, is_module):
if dtype == 'bool_': # Checked in test_invalid_bool_neg
return chainerx.testing.ignore()
x = array_utils.create_dummy_ndarray(xp, shape, dtype)
if is_module:
return xp.negative(x)
else:
return -x
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DtypeError, TypeError))
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_negative_invalid_bool(xp, device, is_module):
x = xp.array([True, False], dtype='bool_')
if is_module:
xp.negative(x)
else:
-x
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_add(xp, device, shape, dtype, is_module):
lhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=1)
rhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=2)
if is_module:
return xp.add(lhs, rhs)
else:
return lhs + rhs
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_iadd(xp, device, shape, dtype):
lhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=1)
rhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=2)
lhs += rhs
return lhs
@pytest.mark.parametrize('scalar', [0, -1, 1, 2])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_add_scalar(scalar, device, shape, dtype):
x_np = array_utils.create_dummy_ndarray(numpy, shape, dtype)
# Implicit casting in NumPy's multiply depends on the 'casting' argument,
# which is not yet supported (ChainerX always casts).
# Therefore, we explicitly cast the scalar to the dtype of the ndarray
# before the multiplication for NumPy.
expected = x_np + numpy.dtype(dtype).type(scalar)
x = chainerx.array(x_np)
scalar_chx = chainerx.Scalar(scalar, dtype)
chainerx.testing.assert_array_equal_ex(x + scalar, expected)
chainerx.testing.assert_array_equal_ex(x + scalar_chx, expected)
chainerx.testing.assert_array_equal_ex(scalar + x, expected)
chainerx.testing.assert_array_equal_ex(scalar_chx + x, expected)
chainerx.testing.assert_array_equal_ex(chainerx.add(x, scalar), expected)
chainerx.testing.assert_array_equal_ex(
chainerx.add(x, scalar_chx), expected)
chainerx.testing.assert_array_equal_ex(chainerx.add(scalar, x), expected)
chainerx.testing.assert_array_equal_ex(
chainerx.add(scalar_chx, x), expected)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('scalar', [0, -1, 1, 2])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_iadd_scalar(xp, scalar, device, shape, dtype):
lhs = array_utils.create_dummy_ndarray(xp, shape, dtype)
rhs = scalar
if xp is numpy:
rhs = numpy.dtype(dtype).type(rhs)
lhs += rhs
return lhs
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_sub(xp, device, shape, numeric_dtype, is_module):
lhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=1)
rhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=2)
if is_module:
return xp.subtract(lhs, rhs)
else:
return lhs - rhs
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_isub(xp, device, shape, numeric_dtype):
lhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=1)
rhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=2)
lhs -= rhs
return lhs
@pytest.mark.parametrize('scalar', [0, -1, 1, 2])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_sub_scalar(scalar, device, shape, dtype):
if dtype == 'bool_':
# Boolean subtract is deprecated.
return chainerx.testing.ignore()
x_np = array_utils.create_dummy_ndarray(numpy, shape, dtype)
# Implicit casting in NumPy's multiply depends on the 'casting' argument,
# which is not yet supported (ChainerX always casts).
# Therefore, we explicitly cast the scalar to the dtype of the ndarray
# before the multiplication for NumPy.
expected = x_np - numpy.dtype(dtype).type(scalar)
expected_rev = numpy.dtype(dtype).type(scalar) - x_np
x = chainerx.array(x_np)
scalar_chx = chainerx.Scalar(scalar, dtype)
chainerx.testing.assert_array_equal_ex(x - scalar, expected)
chainerx.testing.assert_array_equal_ex(x - scalar_chx, expected)
chainerx.testing.assert_array_equal_ex(scalar - x, expected_rev)
chainerx.testing.assert_array_equal_ex(scalar_chx - x, expected_rev)
chainerx.testing.assert_array_equal_ex(
chainerx.subtract(x, scalar), expected)
chainerx.testing.assert_array_equal_ex(
chainerx.subtract(x, scalar_chx), expected)
chainerx.testing.assert_array_equal_ex(
chainerx.subtract(scalar, x), expected_rev)
chainerx.testing.assert_array_equal_ex(
chainerx.subtract(scalar_chx, x), expected_rev)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('scalar', [0, -1, 1, 2])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_isub_scalar(xp, scalar, device, shape, dtype):
if dtype == 'bool_':
# Boolean subtract is deprecated.
return chainerx.testing.ignore()
lhs = array_utils.create_dummy_ndarray(xp, shape, dtype)
rhs = scalar
if xp is numpy:
rhs = numpy.dtype(dtype).type(rhs)
lhs -= rhs
return lhs
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_mul(xp, device, shape, dtype, is_module):
lhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=1)
rhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=2)
if is_module:
return xp.multiply(lhs, rhs)
else:
return lhs * rhs
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_imul(xp, device, shape, dtype):
lhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=1)
rhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=2)
lhs *= rhs
return lhs
@pytest.mark.parametrize('scalar', [0, -1, 1, 2])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_mul_scalar(scalar, device, shape, dtype):
x_np = array_utils.create_dummy_ndarray(numpy, shape, dtype)
# Implicit casting in NumPy's multiply depends on the 'casting' argument,
# which is not yet supported (ChainerX always casts).
# Therefore, we explicitly cast the scalar to the dtype of the ndarray
# before the multiplication for NumPy.
expected = x_np * numpy.dtype(dtype).type(scalar)
x = chainerx.array(x_np)
scalar_chx = chainerx.Scalar(scalar, dtype)
chainerx.testing.assert_array_equal_ex(x * scalar, expected)
chainerx.testing.assert_array_equal_ex(x * scalar_chx, expected)
chainerx.testing.assert_array_equal_ex(scalar * x, expected)
chainerx.testing.assert_array_equal_ex(scalar_chx * x, expected)
chainerx.testing.assert_array_equal_ex(
chainerx.multiply(x, scalar), expected)
chainerx.testing.assert_array_equal_ex(
chainerx.multiply(x, scalar_chx), expected)
chainerx.testing.assert_array_equal_ex(
chainerx.multiply(scalar, x), expected)
chainerx.testing.assert_array_equal_ex(
chainerx.multiply(scalar_chx, x), expected)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('scalar', [0, -1, 1, 2])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_imul_scalar(xp, scalar, device, shape, dtype):
lhs = array_utils.create_dummy_ndarray(xp, shape, dtype)
rhs = scalar
if xp is numpy:
rhs = numpy.dtype(dtype).type(rhs)
lhs *= rhs
return lhs
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_truediv(xp, device, shape, numeric_dtype, is_module):
lhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype)
rhs = xp.arange(1, lhs.size + 1, dtype=numeric_dtype).reshape(shape)
# TODO(beam2d): Remove astype after supporting correct dtype promotion.
if is_module:
return xp.divide(lhs, rhs).astype(numeric_dtype)
else:
return (lhs / rhs).astype(numeric_dtype)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_itruediv(xp, device, shape, numeric_dtype):
# TODO(niboshi): Remove padding=False
lhs = array_utils.create_dummy_ndarray(
xp, shape, numeric_dtype, padding=False)
rhs = xp.arange(1, lhs.size + 1, dtype=numeric_dtype).reshape(shape)
# TODO(beam2d): Fix after supporting correct dtype promotion.
if xp is numpy and 'int' in numeric_dtype:
# NumPy does not support itruediv to integer arrays.
lhs = (lhs / rhs).astype(numeric_dtype)
else:
lhs /= rhs
return lhs
# TODO(hvy): Support and test zero division and mixed dtypes (dtype kinds).
# TODO(hvy): Support and test chainerx.Scalar / chainerx.ndarray.
@pytest.mark.parametrize('scalar', [1, 2])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_truediv_scalar(scalar, device, shape, numeric_dtype):
x_np = array_utils.create_dummy_ndarray(numpy, shape, numeric_dtype)
if 'int' in numeric_dtype:
# NumPy does not support itruediv to integer arrays.
expected = (x_np / scalar).astype(numeric_dtype)
else:
expected = x_np / scalar
x = chainerx.array(x_np)
scalar_chx = chainerx.Scalar(scalar, numeric_dtype)
chainerx.testing.assert_array_equal_ex(x / scalar, expected)
chainerx.testing.assert_array_equal_ex(x / scalar_chx, expected)
chainerx.testing.assert_array_equal_ex(
chainerx.divide(x, scalar), expected)
chainerx.testing.assert_array_equal_ex(
chainerx.divide(x, scalar_chx), expected)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('scalar', [1, 2])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_itruediv_scalar(xp, scalar, device, shape, numeric_dtype):
# TODO(niboshi): Remove padding=False
lhs = array_utils.create_dummy_ndarray(
xp, shape, numeric_dtype, padding=False)
rhs = scalar
# TODO(hvy): Fix after supporting correct dtype promotion.
if xp is numpy and 'int' in numeric_dtype:
# NumPy does not support itruediv to integer arrays.
lhs = (lhs / rhs).astype(numeric_dtype)
else:
lhs /= rhs
return lhs
# TODO(niboshi): Remove strides_check=False
@chainerx.testing.numpy_chainerx_array_equal(strides_check=False)
@pytest.mark.parametrize('keepdims', [False, True])
@pytest.mark.parametrize('shape,axis', [
((), None),
((), ()),
((2,), None),
((2,), ()),
((2,), 0),
((2,), (0,)),
((2,), (-1,)),
((2, 3), None),
((2, 3), ()),
((2, 3), 0),
((2, 3), (0,)),
((2, 3), (1,)),
((2, 3), (-1,)),
((2, 3), (-2,)),
((2, 3), (0, 1)),
((2, 3), (-2, -1)),
((1, 3), None), # sum over 1-dim axis
((0, 3), None), # sum over 0-dim axis
# Sum over axes that are in the middle or apart
((2, 3, 4), (1,)),
((2, 3, 4), (0, 2)),
# Sum over axes that are apart and/or unsorted
((2, 3), (1, 0)),
((2, 3, 4), (2, 0)),
((2, 3, 4), (2, 0, 1)),
((2, 3, 4), (-2, 2, 0)),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_sum(is_module, xp, device, shape, axis, keepdims, dtype):
a = array_utils.create_dummy_ndarray(xp, shape, dtype)
if is_module:
out = xp.sum(a, axis=axis, keepdims=keepdims)
else:
out = a.sum(axis=axis, keepdims=keepdims)
# TODO(niboshi): Unsigned integer dtypes should result in uint64.
# Currently chainerx returns int64.
if xp is numpy and numpy.dtype(dtype).kind == 'u':
out = out.astype(numpy.int64)
return out
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('keepdims', [False, True])
@pytest.mark.parametrize('shape,axis', [
# ((), 0), # TODO(sonots): Fix compatibility
((), 1),
((), (1,)),
((2,), 2),
((2,), (2,)),
((2,), (-2,)),
((2, 3,), (-3,)),
((2, 3,), (-3, -4)),
((2, 3,), (0, 0)),
((2, 3,), (-1, -1)),
((2, 3,), (0, 1, 1)),
((2, 3,), (0, -2)),
])
def test_sum_invalid(is_module, xp, shape, axis, keepdims, dtype):
a = array_utils.create_dummy_ndarray(xp, shape, dtype)
if is_module:
xp.sum(a, axis=axis, keepdims=keepdims)
else:
a.sum(axis=axis, keepdims=keepdims)
# TODO(sonots): Fix type compatibility for when shape is ()
@chainerx.testing.numpy_chainerx_array_equal(dtype_check=False)
@pytest.mark.parametrize("shape,value", [
((), -1),
((), 1),
((1,), -1),
((1,), 1),
((2,), 1),
((2, 3), 3),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_maximum_with_scalar(xp, device, shape, value, signed_dtype):
a = array_utils.create_dummy_ndarray(xp, shape, signed_dtype)
return xp.maximum(a, value)
def _create_dummy_array_for_dot(xp, shape, dtype):
x = numpy.arange(numpy.prod(shape)).reshape(shape)
if dtype == 'bool_':
x = numpy.asarray(x % 2 == 0)
else:
x = x.astype(dtype)
return xp.array(x)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-4), numpy.asarray(4),
numpy.asarray(-float('inf')), numpy.asarray(float('inf')
), numpy.asarray(float('nan')),
numpy.full((), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
# TODO(niboshi): Dtype promotion is not supported yet.
def test_exp(xp, device, input, float_dtype):
dtype = float_dtype
a = xp.array(input.astype(dtype))
return xp.exp(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(float('nan')),
numpy.full((), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
# TODO(niboshi): Dtype promotion is not supported yet.
def test_log(xp, device, input, float_dtype):
dtype = float_dtype
a = xp.array(input.astype(dtype))
return xp.log(a)
_logsumexp_params = [
((2,), 0),
((2,), -1),
((2, 3), None),
((2, 3), 0),
((2, 3), 1),
((2, 3), -2),
((2, 3), (0, 1)),
((2, 3), (-2, 1)),
((1, 2, 3), None),
((1, 2, 3), (1)),
((1, 2, 3), (1, 0)),
((1, 2, 3), (0, 1, 2)),
]
_invalid_logsumexp_params = [
# Axis out of bounds
((2,), 1),
((2,), -2),
((2,), (0, 1)),
((2, 3), (0, 1, 2)),
# Duplicate axes
((2,), (0, 0)),
((2, 3), (0, 0)),
]
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('a_shape,axis', _logsumexp_params)
@pytest.mark.parametrize('keepdims', [True, False])
@chainerx.testing.numpy_chainerx_allclose(rtol=1e-7, atol=0, dtype_check=False)
# TODO(hvy): Dtype promotion is not supported yet.
def test_logsumexp(xp, device, a_shape, axis, float_dtype, keepdims):
a = array_utils.create_dummy_ndarray(xp, a_shape, float_dtype)
if xp is numpy:
return xp.log(xp.sum(xp.exp(a), axis=axis, keepdims=keepdims))
return xp.logsumexp(a, axis=axis, keepdims=keepdims)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('a_shape,axis', _invalid_logsumexp_params)
@pytest.mark.parametrize('keepdims', [True, False])
# TODO(hvy): Dtype promotion is not supported yet.
# TODO(hvy): Should not overflow for large numbers, add tests
def test_logsumexp_invalid(device, a_shape, axis, float_dtype, keepdims):
a = array_utils.create_dummy_ndarray(chainerx, a_shape, float_dtype)
with pytest.raises(chainerx.DimensionError):
chainerx.logsumexp(a, axis=axis, keepdims=keepdims)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('a_shape,axis', _logsumexp_params)
@chainerx.testing.numpy_chainerx_allclose(
rtol=1e-7, atol=1e-5, dtype_check=False)
# TODO(hvy): Dtype promotion is not supported yet.
def test_log_softmax(xp, device, a_shape, axis, float_dtype):
a = array_utils.create_dummy_ndarray(xp, a_shape, float_dtype)
if xp is numpy:
# Default is the second axis
axis = axis if axis is not None else 1
return a - xp.log(xp.sum(xp.exp(a), axis=axis, keepdims=True))
return xp.log_softmax(a, axis=axis)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('a_shape,axis', _invalid_logsumexp_params)
# TODO(hvy): Dtype promotion is not supported yet.
def test_log_softmax_invalid(device, a_shape, axis, float_dtype):
a = array_utils.create_dummy_ndarray(chainerx, a_shape, float_dtype)
with pytest.raises(chainerx.DimensionError):
return chainerx.log_softmax(a, axis=axis)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-4), numpy.asarray(4),
numpy.asarray(-float('inf')), numpy.asarray(float('inf')
), numpy.asarray(float('nan')),
numpy.full((), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
# TODO(hamaji): Dtype promotion is not supported yet.
def test_sqrt(xp, device, input, float_dtype):
dtype = float_dtype
a = xp.array(input.astype(dtype))
return xp.sqrt(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(float('nan')),
numpy.full((), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
# TODO(hamaji): Dtype promotion is not supported yet.
def test_tanh(xp, device, input, float_dtype):
dtype = float_dtype
a = xp.array(input.astype(dtype))
return xp.tanh(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')),
numpy.asarray(float('nan')), numpy.full(
(), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
def test_isnan(xp, device, input, float_dtype):
dtype = float_dtype
a = xp.array(input.astype(dtype))
return xp.isnan(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')),
numpy.asarray(float('nan')), numpy.full(
(), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
def test_isinf(xp, device, input, float_dtype):
dtype = float_dtype
a = xp.array(input.astype(dtype))
return xp.isinf(a)
def test_max_amax():
assert chainerx.amax is chainerx.max
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.DimensionError), strides_check=False)
@pytest.mark.parametrize('input,axis', [
# --- single axis
# input, axis
# valid params
(numpy.asarray(0), None),
(numpy.asarray(-1), None),
(numpy.asarray(float('inf')), None),
(numpy.asarray(float('nan')), None),
(numpy.asarray(-float('inf')), None),
(numpy.asarray([4, 1, 4, 1]), None),
(numpy.asarray([4, 1, 4, 1]), 0),
(numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]), 0),
(numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]).T, 1),
(numpy.asarray([-0.0, +0.0, +0.0, -0.0]), None),
(numpy.asarray([[True, True, False, False],
[True, False, True, False]]), 0),
(numpy.ones((2, 0, 3)), 2),
(numpy.ones((2, 3)), 1),
(numpy.ones((2, 3)), -2),
# invalid params
(numpy.ones((0,)), None),
(numpy.ones((2, 0, 3)), 1),
(numpy.ones((2, 0, 3)), None),
(numpy.ones((2, 3)), 2),
(numpy.ones((2, 3)), -3),
# --- multiple axes
# input, axis
# valid params
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (0, 1)),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (-2, -1)),
# invalid params
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (1, 1)),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (-3, 1)),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (1, 2)),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
# TODO(niboshi): Remove strides_check=False
def test_max(is_module, xp, device, input, axis, dtype):
try:
a_np = input.astype(dtype)
except (ValueError, OverflowError):
return xp.zeros(()) # invalid combination of data and dtype
a = xp.array(a_np)
if is_module:
return xp.max(a, axis)
else:
return a.max(axis)
| mit | 2,352,796,551,604,239,000 | 35.732993 | 79 | 0.638965 | false | 3.094413 | true | false | false |
briancurtin/rackspace-sdk-plugin | rackspace/monitoring/v1/notification.py | 2 | 1784 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
from openstack import utils
from rackspace.monitoring import monitoring_service
class Notification(resource.Resource):
base_path = 'notifications'
resources_key = 'values'
service = monitoring_service.MonitoringService()
# capabilities
allow_create = True
allow_delete = True
allow_list = True
allow_retrieve = True
allow_update = True
# Properties
#: Details specific to the notification. *Type: dict*
details = resource.prop('details', type=dict)
#: A friendly label for the notification type
name = resource.prop('label')
#: The type of notification to send
type = resource.prop('type')
def test(self, session):
"""Test an existing notification
The notification comes from the same server that the alert messages
come from. One use for this test is to verify that your firewall is
configured properly.
:param session: The session to use for making this request.
:type session: :class:`~openstack.session.Session`
:returns: ``dict``
"""
url = utils.urljoin(self.base_path, self.id, 'test')
return session.post(url, endpoint_filter=self.service).body
| apache-2.0 | 7,101,806,672,857,766,000 | 34.68 | 75 | 0.704036 | false | 4.415842 | false | false | false |
thomasvs/pychecker | pychecker/function.py | 4 | 8717 | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# Copyright (c) 2001-2002, MetaSlash Inc. All rights reserved.
"""
Object to hold information about functions.
Also contain a pseudo Python function object
"""
import string
_ARGS_ARGS_FLAG = 4
_KW_ARGS_FLAG = 8
_CO_FLAGS_MASK = _ARGS_ARGS_FLAG + _KW_ARGS_FLAG
class _ReturnValues:
"""
I am a base class that can track return values.
@ivar returnValues: tuple of (line number, stack item,
index to next instruction)
@type returnValues: tuple of (int, L{pychecker.Stack.Item}, int)
"""
def __init__(self):
self.returnValues = None
def returnsNoValue(self):
returnValues = self.returnValues
# if unset, we don't know
if returnValues is None:
return 0
# it's an empty list, that means no values
if not returnValues:
return 1
# make sure each value is not None
for rv in returnValues:
if not rv[1].isNone():
return 0
return returnValues[-1][1].isImplicitNone()
class FakeCode :
"This is a holder class for code objects (so we can modify them)"
def __init__(self, code, varnames = None) :
"""
@type code: L{types.CodeType}
"""
for attr in dir(code):
try:
setattr(self, attr, getattr(code, attr))
except:
pass
if varnames is not None:
self.co_varnames = varnames
class FakeFunction(_ReturnValues):
"""
This is a holder class for turning non-scoped code (for example at
module-global level, or generator expressions) into a function.
Pretends to be a normal callable and can be used as constructor argument
to L{Function}
"""
def __init__(self, name, code, func_globals = {}, varnames = None) :
_ReturnValues.__init__(self)
self.func_name = self.__name__ = name
self.func_doc = self.__doc__ = "ignore"
self.func_code = FakeCode(code, varnames)
self.func_defaults = None
self.func_globals = func_globals
def __str__(self):
return self.func_name
def __repr__(self):
return '%s from %r' % (self.func_name, self.func_code.co_filename)
class Function(_ReturnValues):
"""
Class to hold all information about a function
@ivar function: the function to wrap
@type function: callable
@ivar isMethod: whether the callable is a method
@type isMethod: int (used as bool)
@ivar minArgs: the minimum number of arguments that should be passed to
this function
@type minArgs: int
@ivar minArgs: the maximum number of arguments that should be passed to
this function, or None in case of *args/unlimited
@type maxArgs: int or None
@ivar supportsKW: whether the function supports keyword arguments.
@type supportsKW: int (used as bool)
"""
def __init__(self, function, isMethod=0):
"""
@param function: the function to wrap
@type function: callable or L{FakeFunction}
@param isMethod: whether the callable is a method
@type isMethod: int (used as bool)
"""
_ReturnValues.__init__(self)
self.function = function
self.isMethod = isMethod
# co_argcount is the number of positional arguments (including
# arguments with default values)
self.minArgs = self.maxArgs = function.func_code.co_argcount
# func_defaults is a tuple containing default argument values for those
# arguments that have defaults, or None if no arguments have a default
# value
if function.func_defaults is not None:
self.minArgs = self.minArgs - len(function.func_defaults)
# if function uses *args, there is no max # args
try:
# co_flags is an integer encoding a number of flags for the
# interpreter.
if function.func_code.co_flags & _ARGS_ARGS_FLAG != 0:
self.maxArgs = None
self.supportsKW = function.func_code.co_flags & _KW_ARGS_FLAG
except AttributeError:
# this happens w/Zope
self.supportsKW = 0
def __str__(self):
return self.function.func_name
def __repr__(self):
# co_filename is the filename from which the code was compiled
# co_firstlineno is the first line number of the function
return '<%s from %r:%d>' % (self.function.func_name,
self.function.func_code.co_filename,
self.function.func_code.co_firstlineno)
def arguments(self):
"""
@returns: a list of argument names to this function
@rtype: list of str
"""
# see http://docs.python.org/reference/datamodel.html#types
# for more info on func_code
# co_argcount is the number of positional arguments (including
# arguments with default values)
numArgs = self.function.func_code.co_argcount
if self.maxArgs is None:
# co_varnames has the name of the *args variable after the
# positional arguments
numArgs = numArgs + 1
if self.supportsKW:
# co_varnames has the name of the **kwargs variable after the
# positional arguments and *args variable
numArgs = numArgs + 1
# co_varnames is a tuple containing the names of the local variables
# (starting with the argument names)
# FIXME: a generator seems to have .0 as the first member here,
# and then the generator variable as the second.
# should we special-case that here ?
return self.function.func_code.co_varnames[:numArgs]
def isParam(self, name):
"""
@type name: str
@returns: Whether the given name is the name of an argument to the
function
@rtype: bool
"""
return name in self.arguments()
def isStaticMethod(self):
return self.isMethod and isinstance(self.function, type(create_fake))
def isClassMethod(self):
try:
return self.isMethod and self.function.im_self is not None
except AttributeError:
return 0
def defaultValue(self, name):
"""
@type name: str
@returns: the default value for the function parameter with the given
name.
"""
func_code = self.function.func_code
arg_names = list(func_code.co_varnames[:func_code.co_argcount])
i = arg_names.index(name)
if i < self.minArgs:
raise ValueError
return self.function.func_defaults[i - self.minArgs]
def varArgName(self):
"""
@returns: the name of the *args parameter of the function.
@rtype: str
"""
if self.maxArgs is not None:
return None
func_code = self.function.func_code
return func_code.co_varnames[func_code.co_argcount]
def create_fake(name, code, func_globals = {}, varnames = None) :
return Function(FakeFunction(name, code, func_globals, varnames))
def create_from_file(file, filename, module):
"""
@type filename: str
@returns: a function that represents the __main__ entry point, if
there was a file
@rtype: L{Function}
"""
if file is None:
return create_fake(filename, compile('', filename, 'exec'))
# Make sure the file is at the beginning
# if python compiled the file, it will be at the end
file.seek(0)
# Read in the source file, see py_compile.compile() for games w/src str
codestr = file.read()
codestr = string.replace(codestr, "\r\n", "\n")
codestr = string.replace(codestr, "\r", "\n")
if codestr and codestr[-1] != '\n':
codestr = codestr + '\n'
code = compile(codestr, filename, 'exec')
return Function(FakeFunction('__main__', code, module.__dict__))
def _co_flags_equal(o1, o2) :
return (o1.co_flags & _CO_FLAGS_MASK) == (o2.co_flags & _CO_FLAGS_MASK)
def same_signature(func, object) :
'''Return a boolean value if the <func> has the same signature as
a function with the same name in <object> (ie, an overriden method)'''
try :
baseMethod = getattr(object, func.func_name)
base_func_code = baseMethod.im_func.func_code
except AttributeError :
return 1
return _co_flags_equal(base_func_code, func.func_code) and \
base_func_code.co_argcount == func.func_code.co_argcount
| bsd-3-clause | 1,329,208,620,123,719,700 | 34.008032 | 79 | 0.601354 | false | 4.048769 | false | false | false |
drougge/wellpapp-pyclient | wellpapp/shell/replace.py | 1 | 2031 | from __future__ import print_function
from os.path import lexists, realpath
from os import unlink, rename, symlink, stat
from optparse import OptionParser
from hashlib import md5
from PIL import Image, PngImagePlugin
from wellpapp import Client, make_pdirs
def main(arg0, argv):
p = OptionParser(usage="Usage: %prog [-t] post-spec new-file", prog=arg0)
p.add_option("-t", "--regenerate-thumbs",
action="store_true",
help="Regenerate thumbnails from new-file"
)
opts, args = p.parse_args(argv)
if len(args) != 2:
p.print_help()
return 1
client = Client()
oldfile, newfile = args
m = client.postspec2md5(oldfile)
post = client.get_post(m, wanted=(["ext", "rotate"]))
if not post:
print("Post not found")
return 1
data = open(newfile, "rb").read()
newm = md5(data).hexdigest()
mtime = stat(newfile).st_mtime
if client.get_post(newm, wanted=()):
print("New file already has post")
return 1
path = client.image_path(newm)
if lexists(path):
unlink(path)
make_pdirs(path)
symlink(realpath(newfile), path)
if opts.regenerate_thumbs:
# @@ assumes same ext
client.save_thumbs(newm, None, post.ext, post.rotate, True)
else:
meta = PngImagePlugin.PngInfo()
meta.add_text("Thumb::URI", str(newm + "." + post.ext), 0)
meta.add_text("Thumb::MTime", str(int(mtime)), 0)
sizes = list(map(int, client.cfg.thumb_sizes.split())) + ["normal", "large"]
for z in sizes:
if isinstance(z, int):
oldpath = client.thumb_path(m, z)
if opts.regenerate_thumbs:
unlink(oldpath)
else:
newpath = client.thumb_path(newm, z)
make_pdirs(newpath)
rename(oldpath, newpath)
else:
oldpath = client.pngthumb_path(m, post.ext, z)
if opts.regenerate_thumbs:
unlink(oldpath)
else:
t = Image.open(oldpath)
t.load()
newpath = client.pngthumb_path(newm, post.ext, z)
make_pdirs(newpath)
t.save(newpath, format="PNG", pnginfo=meta)
client.modify_post(m, MD5=newm)
path = client.image_path(m)
if lexists(path):
unlink(path)
| mit | 4,836,119,202,779,208,000 | 28.014286 | 77 | 0.670606 | false | 2.748309 | false | false | false |
fresskarma/tinyos-1.x | contrib/handhelds/tools/scripts/common.py | 2 | 4032 | #!/usr/bin/python
'''
Copyright (c) 2005 Hewlett-Packard Company
All rights reserved
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the Hewlett-Packard Company nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Common tokenizing and tree stuff
February 2005
Andrew Christian
'''
#####################################################
class Token:
def __init__(self, type, attr=None, lineno=-1):
self.type = type
self.attr = attr
self.lineno = lineno
def __cmp__(self, o):
return cmp(self.type, o)
def __repr__(self):
if self.attr is not None:
return "%s token on line %d" % (self.attr, self.lineno)
return "%s token on line %d" % (self.type, self.lineno)
# __getitem__ only if you have heterogeneous ASTs
#def __getitem__(self, i):
# raise IndexError
class AST:
def __init__(self, type):
self.type = type
self._kids = []
self.lineno = -1
def __getitem__(self, i):
return self._kids[i]
def __len__(self):
return len(self._kids)
def __setslice__(self, low, high, seq):
self._kids[low:high] = seq
def __cmp__(self, o):
return cmp(self.type, o)
def __repr__(self):
if hasattr(self,'attr') and self.attr is not None:
return "%s token on line %d" % (self.attr, self.lineno)
return "%s token on line %d" % (self.type, self.lineno)
#####################################################
def tokenize_by_line(scanner,filename):
'''Parse a data file with a line-by-line scanner
Pass the class of the scanner and the filename
Returns a token list
'''
fd = open(filename)
tlist = []
input = fd.readline()
lineno = 1
while input:
bs = scanner(lineno)
tlist += bs.tokenize(input)
lineno += 1
input = fd.readline()
fd.close()
return tlist
def parse_tokens(parser,tlist):
p = parser(AST)
atree = p.parse(tlist)
return atree
#####################################################
import re
def dump_token_list(tlist):
foo = re.compile('\n')
for t in tlist:
if t.attr:
print t.lineno, "TOKEN %s '%s'" % (t.type, foo.sub('.',t.attr))
else:
print t.lineno, "TOKEN", t.type
def dump_ast(atree,depth=0):
foo = re.compile('\n')
if hasattr(atree,'attr') and atree.attr is not None:
a = atree.attr
if type(a) is str: a = foo.sub('.',a)
print " " * depth, atree.type, a
else:
print " " * depth, atree.type
try:
for k in atree:
dump_ast(k,depth+1)
except:
pass
| bsd-3-clause | -6,222,359,631,060,490,000 | 29.545455 | 75 | 0.618056 | false | 4.003972 | false | false | false |
jonjesbuzz/cards | python/cards.py | 1 | 1613 | from enum import Enum
from typing import List
from random import randint
class Suit(Enum):
SPADE = 0
CLUB = 1
HEART = 2
DIAMOND = 3
def __str__(self) -> str:
return str(self.name).capitalize()
class Rank(Enum):
ACE = 1
TWO = 2
THREE = 3
FOUR = 4
FIVE = 5
SIX = 6
SEVEN = 7
EIGHT = 8
NINE = 9
TEN = 10
JACK = 11
QUEEN = 12
KING = 13
def __str__(self) -> str:
if self.value >= 2 and self.value <= 10:
return str(self.value)
return str(self.name).capitalize()
class Card:
def __init__(self, suit: Suit, rank: Rank) -> None:
self.suit = suit
self.rank = rank
def __str__(self) -> str:
return "{} of {}".format(self.rank, self.suit)
class Deck:
def __init__(self) -> None:
self.deck = [] # type: List[Card]
for s in list(Suit):
for v in list(Rank):
self.deck.append(Card(s, v))
def shuffle(self, iterations:int=1000) -> None:
a, b = 0, 0
for i in range(1000):
a = i % 52
while True:
b = randint(0, 51)
if a != b:
break
self.deck[a], self.deck[b] = self.deck[b], self.deck[a]
def __str__(self) -> str:
s = ""
for i, card in enumerate(self.deck):
s += str(card)
separator = '\t'
if i % 4 == 3:
separator = '\n'
s += separator
return s
if __name__ == '__main__':
d = Deck()
d.shuffle()
print(d)
| mit | 5,687,092,305,539,702,000 | 21.402778 | 67 | 0.463112 | false | 3.446581 | false | false | false |
zahari/samba | selftest/run.py | 13 | 4305 | #!/usr/bin/python -u
# Bootstrap Samba and run a number of tests against it.
# Copyright (C) 2012 Jelmer Vernooij <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test command running."""
import datetime
from subunit import iso8601
import os
import subprocess
import subunit
import sys
import tempfile
import warnings
# expand strings from %ENV
def expand_environment_strings(s, vars):
# we use a reverse sort so we do the longer ones first
for k in sorted(vars.keys(), reverse=True):
v = vars[k]
s = s.replace("$%s" % k, v)
return s
def expand_command_list(cmd):
if not "$LISTOPT" in cmd:
return None
return cmd.replace("$LISTOPT", "--list")
def expand_command_run(cmd, supports_loadfile, supports_idlist, subtests=None):
"""Expand a test command.
:param cmd: Command to expand
:param supports_loadfile: Whether command supports loadfile
:param supports_idlist: Whether the command supports running specific
subtests
:param subtests: List of subtests to run - None for all subtests
:return: Tuple with command to run and temporary file to remove after
running (or None)
"""
# Generate a file with the individual tests to run, if the
# test runner for this test suite supports it.
if subtests is None:
return (cmd.replace("$LOADLIST", ""), None)
if supports_loadfile:
(fd, listid_file) = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
try:
for test in subtests:
f.write(test+"\n")
finally:
f.close()
return (
cmd.replace("$LOADLIST", "--load-list=%s" % listid_file),
listid_file)
elif supports_idlist:
cmd += " " + " ".join(subtests)
return (cmd, None)
else:
warnings.warn(
"Running subtests requested, but command does not support "
"this.")
return (cmd, None)
def exported_envvars_str(vars, names):
out = ""
for n in names:
if not n in vars:
continue
out += "%s=%s\n" % (n, vars[n])
return out
def now():
"""Return datetime instance for current time in UTC.
"""
return datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc())
def run_testsuite_command(name, cmd, subunit_ops, env=None, outf=None):
"""Run a testsuite command.
:param name: Name of the testsuite
:param cmd: Command to run
:param subunit_ops: Subunit ops to use for reporting results
:param env: Environment the test is run in
:param outf: File-like object to write standard out to (defaults to sys.stdout)
:return: Exit code or None if the test failed to run completely
"""
if outf is None:
outf = sys.stdout
subunit_ops.start_testsuite(name)
subunit_ops.progress(None, subunit.PROGRESS_PUSH)
subunit_ops.time(now())
try:
exitcode = subprocess.call(cmd, shell=True, stdout=outf)
except Exception, e:
subunit_ops.time(now())
subunit_ops.progress(None, subunit.PROGRESS_POP)
subunit_ops.end_testsuite(name, "error", "Unable to run %r: %s" % (cmd, e))
return None
subunit_ops.time(now())
subunit_ops.progress(None, subunit.PROGRESS_POP)
if env is not None:
envlog = env.get_log()
if envlog != "":
outf.write("envlog: %s\n" % envlog)
outf.write("command: %s\n" % cmd)
outf.write("expanded command: %s\n" % expand_environment_strings(cmd, os.environ))
if exitcode == 0:
subunit_ops.end_testsuite(name, "success")
else:
subunit_ops.end_testsuite(name, "failure", "Exit code was %d" % exitcode)
return exitcode
| gpl-3.0 | 3,631,319,685,478,454,000 | 31.126866 | 86 | 0.648084 | false | 3.72083 | true | false | false |
neuronalX/workshop_cellular_automaton | gray_scott.py | 1 | 2448 | # -----------------------------------------------------------------------------
# From Numpy to Python
# Copyright (2017) Nicolas P. Rougier - BSD license
# More information at https://github.com/rougier/numpy-book
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
# Parameters from http://www.aliensaint.com/uo/java/rd/
# -----------------------------------------------------
n = 256
# Du, Dv, F, k = 0.16, 0.08, 0.035, 0.065 # Bacteria 1
# Du, Dv, F, k = 0.14, 0.06, 0.035, 0.065 # Bacteria 2
# Du, Dv, F, k = 0.16, 0.08, 0.060, 0.062 # Coral
# Du, Dv, F, k = 0.19, 0.05, 0.060, 0.062 # Fingerprint
Du, Dv, F, k = 0.10, 0.10, 0.018, 0.050 # Spirals
# Du, Dv, F, k = 0.12, 0.08, 0.020, 0.050 # Spirals Dense
# Du, Dv, F, k = 0.10, 0.16, 0.020, 0.050 # Spirals Fast
# Du, Dv, F, k = 0.16, 0.08, 0.020, 0.055 # Unstable
# Du, Dv, F, k = 0.16, 0.08, 0.050, 0.065 # Worms 1
# Du, Dv, F, k = 0.16, 0.08, 0.054, 0.063 # Worms 2
# Du, Dv, F, k = 0.16, 0.08, 0.035, 0.060 # Zebrafish
Z = np.zeros((n+2, n+2), [('U', np.double),
('V', np.double)])
U, V = Z['U'], Z['V']
u, v = U[1:-1, 1:-1], V[1:-1, 1:-1]
r = 20
u[...] = 1.0
U[n//2-r:n//2+r, n//2-r:n//2+r] = 0.50
V[n//2-r:n//2+r, n//2-r:n//2+r] = 0.25
u += 0.05*np.random.uniform(-1, +1, (n, n))
v += 0.05*np.random.uniform(-1, +1, (n, n))
def update(frame):
global U, V, u, v, im
for i in range(10):
Lu = ( U[0:-2, 1:-1] +
U[1:-1, 0:-2] - 4*U[1:-1, 1:-1] + U[1:-1, 2:] +
U[2: , 1:-1])
Lv = ( V[0:-2, 1:-1] +
V[1:-1, 0:-2] - 4*V[1:-1, 1:-1] + V[1:-1, 2:] +
V[2: , 1:-1])
uvv = u*v*v
u += (Du*Lu - uvv + F*(1-u))
v += (Dv*Lv + uvv - (F+k)*v)
im.set_data(V)
im.set_clim(vmin=V.min(), vmax=V.max())
fig = plt.figure(figsize=(4, 4))
fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
im = plt.imshow(V, interpolation='bicubic', cmap=plt.cm.viridis)
plt.xticks([]), plt.yticks([])
animation = FuncAnimation(fig, update, interval=10, frames=2000)
# animation.save('gray-scott-1.mp4', fps=40, dpi=80, bitrate=-1, codec="libx264",
# extra_args=['-pix_fmt', 'yuv420p'],
# metadata={'artist':'Nicolas P. Rougier'})
plt.show()
| mit | 4,542,996,697,094,777,000 | 36.661538 | 81 | 0.454657 | false | 2.374394 | false | false | false |
listen1/listen1 | listen1/handlers/playlist.py | 1 | 5556 | # coding=utf8
import logging
from handlers.base import BaseHandler
from models.playlist import PlaylistManager
from replay import get_provider, get_provider_list
logger = logging.getLogger('listenone.' + __name__)
class ShowPlaylistHandler(BaseHandler):
def get(self):
source = self.get_argument('source', '0')
provider_list = get_provider_list()
index = int(source)
if index >= 0 and index < len(provider_list):
provider = provider_list[index]
playlist = provider.list_playlist()
else:
playlist = []
result = dict(result=playlist)
self.write(result)
class PlaylistHandler(BaseHandler):
def get(self):
list_id = self.get_argument('list_id', '')
if list_id.startswith('my_'):
playlist = PlaylistManager.shared_instance().get_playlist(list_id)
info = dict(
cover_img_url=playlist['cover_img_url'],
title=playlist['title'], id=playlist['id'])
result = dict(
status='1', tracks=playlist['tracks'], info=info, is_mine='1')
else:
provider = get_provider(list_id)
item_id = list_id.split('_')[1]
result = provider.get_playlist(item_id)
result.update(dict(is_mine='0'))
self.write(result)
class AddMyPlaylistHandler(BaseHandler):
def post(self):
list_id = self.get_argument('list_id', '')
track_id = self.get_argument('id', '')
title = self.get_argument('title', '')
artist = self.get_argument('artist', '')
url = self.get_argument('url', '')
artist_id = self.get_argument('artist_id', '')
album = self.get_argument('album', '')
album_id = self.get_argument('album_id', '')
source = self.get_argument('source', '')
source_url = self.get_argument('source_url', '')
track = {
'id': track_id,
'title': title,
'artist': artist,
'url': url,
'artist_id': artist_id,
'album': album,
'album_id': album_id,
'source': source,
'source_url': source_url,
}
PlaylistManager.shared_instance().add_track_in_playlist(track, list_id)
result = dict(result='success')
self.write(result)
class CreateMyPlaylistHandler(BaseHandler):
def post(self):
list_title = self.get_argument('list_title', '')
track_id = self.get_argument('id', '')
title = self.get_argument('title', '')
artist = self.get_argument('artist', '')
url = self.get_argument('url', '')
artist_id = self.get_argument('artist_id', '')
album = self.get_argument('album', '')
album_id = self.get_argument('album_id', '')
source = self.get_argument('source', '')
source_url = self.get_argument('source_url', '')
track = {
'id': track_id,
'title': title,
'artist': artist,
'url': url,
'artist_id': artist_id,
'album': album,
'album_id': album_id,
'source': source,
'source_url': source_url,
}
newlist_id = PlaylistManager.shared_instance()\
.create_playlist(list_title)
PlaylistManager.shared_instance()\
.add_track_in_playlist(track, newlist_id)
result = dict(result='success')
self.write(result)
class ShowMyPlaylistHandler(BaseHandler):
def get(self):
resultlist = PlaylistManager.shared_instance().\
list_playlist()
result = dict(result=resultlist)
self.write(result)
class ClonePlaylistHandler(BaseHandler):
def post(self):
list_id = self.get_argument('list_id', '')
provider = get_provider(list_id)
if list_id[2:].startswith('album'):
album_id = list_id.split('_')[1]
album = provider.get_album(album_id)
tracks = album['tracks']
info = album['info']
elif list_id[2:].startswith('artist'):
artist_id = list_id.split('_')[1]
artist = provider.get_artist(artist_id)
tracks = artist['tracks']
info = artist['info']
elif list_id[2:].startswith('playlist'):
playlist_id = list_id.split('_')[1]
playlist = provider.get_playlist(playlist_id)
tracks = playlist['tracks']
info = playlist['info']
list_title = info['title']
cover_img_url = info['cover_img_url']
newlist_id = PlaylistManager.shared_instance()\
.create_playlist(list_title, cover_img_url)
for track in tracks:
PlaylistManager.shared_instance()\
.add_track_in_playlist(track, newlist_id)
result = dict(result='success')
self.write(result)
class RemoveTrackHandler(BaseHandler):
def post(self):
track_id = self.get_argument('track_id', '')
list_id = self.get_argument('list_id', '')
PlaylistManager.shared_instance().remove_track_in_playlist(
track_id, list_id)
result = dict(result='success')
PlaylistManager.shared_instance()
self.write(result)
class RemoveMyPlaylistHandler(BaseHandler):
def post(self):
list_id = self.get_argument('list_id', '')
PlaylistManager.shared_instance().remove_playlist(list_id)
result = dict(result='success')
self.write(result)
| mit | 2,191,440,012,835,903,200 | 31.87574 | 79 | 0.561375 | false | 3.901685 | false | false | false |
npetrenko/recurrent_frcnn | rcnn/MOT_parser.py | 1 | 4081 | import cv2
import numpy as np
import os
def read_ini(fpath):
ret = {}
with open(fpath, 'r') as f:
for line in f:
if '=' in line:
k, v = line.split('=')
v = v.rstrip()
try:
v = int(v)
except:
pass
ret[k] = v
return ret
def get_data(mot_pathes, part='train'):
found_bg = False
all_videos = []
classes_count = {}
class_mapping = {}
visualise = False
datasets = []
for mot_path in mot_pathes:
path = os.path.join(mot_path, part)
datasets += [x for x in map(lambda x: os.path.join(path, x), os.listdir(path)) if not ('/.' in x)]
print('Parsing annotation files')
for dataset in datasets:
try:
form = read_ini(os.path.join(dataset, 'seqinfo.ini'))['imExt'][1:]
except:
form = 'jpg'
try:
sprob = read_ini(os.path.join(dataset, 'seqinfo.ini'))['sampleprob']
except:
sprob = 1
coord_form = 'xywh'
try:
coord_form = read_ini(os.path.join(dataset, 'seqinfo.ini'))['coordform']
except:
pass
frame_path = lambda x: os.path.join(dataset, 'img1', str(x).zfill(6) + '.' + form)
#print(frame_path)
frames = {}
last_frame = -1
first_frame = 1e8
if part == 'train':
bfile = 'gt/gt.txt'
else:
bfile = 'det/det.txt'
with open(os.path.join(dataset, bfile),'r') as f:
for line in f:
line_split = line.strip().split(',')
if part == 'train':
try:
cls = int(line_split[6])
except:
print(line)
print(dataset)
raise
if cls not in [1, 2, 7]:
continue
try:
frameix,x1,y1,w,h = map(lambda x: int(float(x)), line_split[0:1] + line_split[2:6])
except:
print(dataset, line)
raise
if coord_form == 'xywh':
x2 = x1 + w
y2 = y1 + h
else:
x2 = w
y2 = h
class_name = 'bbox'
last_frame = max(frameix, last_frame)
first_frame = min(first_frame, frameix)
if class_name not in classes_count:
classes_count[class_name] = 1
else:
classes_count[class_name] += 1
if class_name not in class_mapping:
class_mapping[class_name] = len(class_mapping)
if not frameix in frames:
frames[frameix] = {}
#print(frame_path.format(frameix))
img = cv2.imread(frame_path(frameix))
try:
(rows,cols) = img.shape[:2]
except:
print(frame_path(frameix), frameix)
frames[frameix]['filepath'] = frame_path(frameix)
frames[frameix]['width'] = cols
frames[frameix]['height'] = rows
frames[frameix]['bboxes'] = []
frames[frameix]['bboxes'].append({'class': class_name, 'x1': int(x1), 'x2': int(x2), 'y1': int(y1), 'y2': int(y2)})
video = []
break_flag = False
for frameix in range(first_frame, last_frame+1):
try:
video.append(frames[frameix])
except:
print('Unable to fetch frames in {}, passing'.format(dataset))
break_flag = True
break
if break_flag:
continue
all_videos.append({'video': video, 'sampleprob': sprob})
return all_videos, classes_count, class_mapping
| apache-2.0 | -6,515,110,188,819,180,000 | 27.943262 | 131 | 0.432002 | false | 4.109768 | false | false | false |
xiaohan2012/capitalization-restoration-train | tests/test_eval_rule_based.py | 1 | 1526 | import numpy as np
from numpy.testing import assert_array_equal
import os
from capitalization_train.evaluate import (eval_rule_based,
is_consistent_prediction,
eval_stat)
from nose.tools import (assert_equal,
assert_false,
assert_true)
CURDIR = os.path.dirname(os.path.realpath(__file__))
def test_eval_rule_based():
actual = eval_rule_based(CURDIR + '/data/rule_based_output.txt',
okform_dir=CURDIR + '/data/docs_okformed/',
accepted_labels=['AL', 'IC'])
expected = np.asarray([[10, 11, 10],
[5, 5, 6]])
assert_array_equal(actual, expected)
def test_eval_stat():
pred_tokens = ["SuperGroup", "sales", "rebound", "over", "Christmas",
"to", "defy", "city", "EXPECTATIONS"]
true_tokens = 'SuperGroup sales rebound over Christmas to defy City EXPECTATIONS'.split()
actual = eval_stat(pred_tokens, true_tokens, accepted_labels=['AL', 'IC'])
expected = np.asarray([[5, 6, 5],
[1, 1, 2]])
assert_array_equal(actual, expected)
def test_is_consistent_prediction():
assert_false(
is_consistent_prediction(['A'], ['A', 'extra token'])
)
assert_false(
is_consistent_prediction(['A', 'B'], ['A', 'different'])
)
assert_true(
is_consistent_prediction(['A', 'B'], ['A', 'B'])
)
| mit | 8,785,436,546,239,153,000 | 33.681818 | 93 | 0.534076 | false | 3.912821 | false | false | false |
rmdfnc/amazonHistory | hist.py | 1 | 1276 | '''
Take Amazon csv and visualize your spending
'''
import pdb
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
#pdb.set_trace()
from datetime import datetime
#Initialize variables
date = []
total = []
rownum = 0
# Import the csv file
with open ('sampleHistory.csv') as csvfile:
amzreader = csv.DictReader(csvfile,delimiter=',')
for row in amzreader:
date.append(datetime.strptime(row['Order Date'], '%m/%d/%y'))
totalstr = row['Item Total']
totalstr = totalstr[1:]
total.append(float(totalstr))
#print(row['Order Date'])
rownum = rownum+1
#### Calculate quantities of interest ###
cumtotal = np.cumsum(total)
#### Visualize the data ####
fig, ax = plt.subplots(2)
#Plot the distribution of purchase sizes
n, bins, patches = ax[0].hist(total, 5, facecolor='green', alpha=0.75)
ax[0].set_ylabel("# of Purchases")
ax[0].set_xlabel("Dollar Amount")
#Plot cumulative total
ax[1].plot(date,cumtotal)
ax[1].set_ylabel("Cumulative Spend ($)")
#Format figure
plt.setp(plt.xticks()[1],rotation=30, ha='right')
plt.tight_layout() # Moves axis edges to prevent axis labels from being clipped
plt.show()
print('test')
| mit | -496,072,831,152,706,400 | 25.040816 | 80 | 0.652038 | false | 3.375661 | false | false | false |
hanguokai/youku | youku/youku_videos.py | 1 | 9104 | """Youku Open API V2 Python Client
doc: http://open.youku.com/docs/tech_doc.html
"""
import requests
from .util import check_error, remove_none_value
class YoukuVideos(object):
"""Youku Videos API.
doc:http://open.youku.com/docs/api_videos.html
"""
def __init__(self, client_id):
super(YoukuVideos, self).__init__()
self.client_id = client_id
def find_video_by_id(self, video_id):
"""doc: http://open.youku.com/docs/doc?id=44
"""
url = 'https://openapi.youku.com/v2/videos/show_basic.json'
params = {
'client_id': self.client_id,
'video_id': video_id
}
r = requests.get(url, params=params)
check_error(r)
return r.json()
def find_video_by_url(self, video_url):
"""doc: http://open.youku.com/docs/doc?id=44
"""
url = 'https://openapi.youku.com/v2/videos/show_basic.json'
params = {
'client_id': self.client_id,
'video_url': video_url
}
r = requests.get(url, params=params)
check_error(r)
return r.json()
def find_videos_by_ids(self, video_ids):
"""doc: http://open.youku.com/docs/doc?id=45
"""
url = 'https://openapi.youku.com/v2/videos/show_basic_batch.json'
params = {
'client_id': self.client_id,
'video_ids': video_ids
}
r = requests.get(url, params=params)
check_error(r)
return r.json()
def find_video_detail_by_id(self, video_id, ext=None):
"""doc: http://cloud.youku.com/docs?id=46
"""
url = 'https://api.youku.com/videos/show.json'
params = {
'client_id': self.client_id,
'video_id': video_id
}
if ext:
params['ext'] = ext
r = requests.get(url, params=params)
check_error(r)
return r.json()
def find_video_details_by_ids(self, video_ids, ext=None):
"""doc: http://open.youku.com/docs/doc?id=47
"""
url = 'https://openapi.youku.com/v2/videos/show_batch.json'
params = {
'client_id': self.client_id,
'video_ids': video_ids
}
if ext:
params['ext'] = ext
r = requests.get(url, params=params)
check_error(r)
return r.json()
def find_videos_by_me(self, access_token, orderby='published',
page=1, count=20):
"""doc: http://cloud.youku.com/docs?id=48
"""
url = 'https://api.youku.com/videos/by_me.json'
params = {
'client_id': self.client_id,
'access_token': access_token,
'orderby': orderby,
'page': page,
'count': count
}
r = requests.get(url, params=params)
check_error(r)
return r.json()
def find_videos_by_userid(self, user_id, orderby='published',
page=1, count=20):
"""doc: http://open.youku.com/docs/doc?id=49
"""
url = 'https://openapi.youku.com/v2/videos/by_user.json'
params = {
'client_id': self.client_id,
'user_id': user_id,
'orderby': orderby,
'page': page,
'count': count
}
r = requests.get(url, params=params)
check_error(r)
return r.json()
def find_videos_by_username(self, user_name, orderby='published',
page=1, count=20):
"""doc: http://open.youku.com/docs/doc?id=49
"""
url = 'https://openapi.youku.com/v2/videos/by_user.json'
params = {
'client_id': self.client_id,
'user_name': user_name,
'orderby': orderby,
'page': page,
'count': count
}
r = requests.get(url, params=params)
check_error(r)
return r.json()
def update_video(self, access_token, video_id, title=None,
tags=None, category=None, copyright_type=None,
public_type=None, watch_password=None,
description=None, thumbnail_seq=None):
"""doc: http://open.youku.com/docs/doc?id=50
"""
url = 'https://openapi.youku.com/v2/videos/update.json'
data = {
'client_id': self.client_id,
'access_token': access_token,
'video_id': video_id,
'title': title,
'tags': tags,
'category': category,
'copyright_type': copyright_type,
'public_type': public_type,
'watch_password': watch_password,
'description': description,
'thumbnail_seq': thumbnail_seq
}
data = remove_none_value(data)
r = requests.post(url, data=data)
check_error(r)
return r.json()['id']
def destroy_video(self, access_token, video_id):
"""doc: http://open.youku.com/docs/doc?id=51
"""
url = 'https://openapi.youku.com/v2/videos/destroy.json'
data = {
'client_id': self.client_id,
'access_token': access_token,
'video_id': video_id
}
r = requests.post(url, data=data)
check_error(r)
return r.json()['id']
def find_videos_by_related(self, video_id, count=20):
"""doc: http://open.youku.com/docs/doc?id=52
"""
url = 'https://openapi.youku.com/v2/videos/by_related.json'
params = {
'client_id': self.client_id,
'video_id': video_id,
'count': count
}
r = requests.get(url, params=params)
check_error(r)
return r.json()
def find_favorite_videos_by_me(self, access_token, orderby='favorite-time',
page=1, count=20):
"""doc: http://open.youku.com/docs/doc?id=53
"""
url = 'https://openapi.youku.com/v2/videos/favorite/by_me.json'
params = {
'client_id': self.client_id,
'access_token': access_token,
'orderby': orderby,
'page': page,
'count': count
}
r = requests.get(url, params=params)
check_error(r)
return r.json()
def find_favorite_videos_by_userid(self, user_id,
orderby='favorite-time',
page=1, count=20):
"""doc: http://open.youku.com/docs/doc?id=54
"""
url = 'https://openapi.youku.com/v2/videos/favorite/by_user.json'
params = {
'client_id': self.client_id,
'user_id': user_id,
'orderby': orderby,
'page': page,
'count': count
}
r = requests.get(url, params=params)
check_error(r)
return r.json()
def find_favorite_videos_by_username(self, user_name,
orderby='favorite-time',
page=1, count=20):
"""doc: http://open.youku.com/docs/doc?id=54
"""
url = 'https://openapi.youku.com/v2/videos/favorite/by_user.json'
params = {
'client_id': self.client_id,
'user_name': user_name,
'orderby': orderby,
'page': page,
'count': count
}
r = requests.get(url, params=params)
check_error(r)
return r.json()
def create_favorite_video(self, access_token, video_id):
"""doc: http://open.youku.com/docs/doc?id=55
"""
url = 'https://openapi.youku.com/v2/videos/favorite/create.json'
data = {
'client_id': self.client_id,
'access_token': access_token,
'video_id': video_id
}
r = requests.post(url, data=data)
check_error(r)
return r.json()['id']
def destroy_favorite_video(self, access_token, video_id):
"""doc: http://open.youku.com/docs/doc?id=56
"""
url = 'https://openapi.youku.com/v2/videos/favorite/destroy.json'
data = {
'client_id': self.client_id,
'access_token': access_token,
'video_id': video_id
}
r = requests.post(url, data=data)
check_error(r)
return r.json()['id']
def find_videos_by_category(self, category, genre=None,
period='today',
orderby='view-count',
page=1, count=20):
"""doc: http://open.youku.com/docs/doc?id=57
"""
url = 'https://openapi.youku.com/v2/videos/by_category.json'
params = {
'client_id': self.client_id,
'category': category,
'period': period,
'orderby': orderby,
'page': page,
'count': count
}
if genre:
params['genre'] = genre
r = requests.get(url, params=params)
check_error(r)
return r.json()
| apache-2.0 | -5,377,121,332,355,707,000 | 31.630824 | 79 | 0.49967 | false | 3.536908 | false | false | false |
citrix-openstack-build/ironic | ironic/db/sqlalchemy/migrate_repo/versions/001_init.py | 1 | 3242 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from sqlalchemy import Table, Column, Index, ForeignKey, MetaData
from sqlalchemy import DateTime, Integer, String, Text
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ENGINE = 'InnoDB'
CHARSET = 'utf8'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
nodes = Table('nodes', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36)),
Column('power_info', Text),
Column('cpu_arch', String(length=10)),
Column('cpu_num', Integer),
Column('memory', Integer),
Column('local_storage_max', Integer),
Column('task_state', String(length=255)),
Column('image_path', String(length=255), nullable=True),
Column('instance_uuid', String(length=36), nullable=True),
Column('instance_name', String(length=255), nullable=True),
Column('extra', Text),
Column('created_at', DateTime),
Column('updated_at', DateTime),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
ifaces = Table('ifaces', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=18)),
Column('node_id', Integer, ForeignKey('nodes.id'),
nullable=True),
Column('extra', Text),
Column('created_at', DateTime),
Column('updated_at', DateTime),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
tables = [nodes, ifaces]
for table in tables:
try:
table.create()
except Exception:
LOG.info(repr(table))
LOG.Exception(_('Exception while creating table.'))
raise
indexes = [
Index('node_cpu_mem_disk', nodes.c.cpu_num,
nodes.c.memory, nodes.c.local_storage_max),
Index('node_instance_uuid', nodes.c.instance_uuid),
]
uniques = [
UniqueConstraint('uuid', table=nodes,
name='node_uuid_ux'),
UniqueConstraint('address', table=ifaces,
name='iface_address_ux'),
]
if migrate_engine.name == 'mysql' or migrate_engine.name == 'postgresql':
for index in indexes:
index.create(migrate_engine)
for index in uniques:
index.create(migrate_engine)
def downgrade(migrate_engine):
raise NotImplementedError('Downgrade from Folsom is unsupported.')
| apache-2.0 | -6,940,069,309,110,839,000 | 32.770833 | 78 | 0.628007 | false | 4.103797 | false | false | false |
UTNkar/moore | src/home/migrations/0040_manual_imageicons_data.py | 1 | 2662 | # Generated by Django 2.2.10 on 2020-04-04 13:36
from django.db import migrations
from itertools import chain
from utils.data_migrations import stream_field_filter_map
def imageicon_to_columns(block):
image = {
'type': 'image',
'value':{
'image': block['value']['image'],
'height': 400
}
}
heading = {
'type': 'heading',
'value': {
'title': block['value']['title']
}
}
icons = []
for icon in block['value']['icons']:
icons.append({
'title': icon['title'],
'subtitle': icon['description'],
'icon': icon['icon']
})
icon_group = {
'type': 'icons',
'value': {
'icons': icons
}
}
if block['value']['image_alignment'] == "left":
return {
'type': 'columns',
'value': {
'columns':[
{
'width': 6,
'content': [image]
},
{
'width': 6,
'content': [
heading,
icon_group
]
}
]
}
}
else:
return {
'type': 'columns',
'value': {
'columns':[
{
'width': 6,
'content': [
heading,
icon_group
]
},
{
'width': 6,
'content': [image]
}
]
}
}
def apply_to_all_pages(apps, mapper):
HomePage = apps.get_model('home', 'HomePage')
WebPage = apps.get_model('home', 'WebPage')
hps = HomePage.objects.all()
wps = WebPage.objects.all();
for obj in chain(hps, wps):
# There is a long-standing mistake that image-icons and image-description have swapped tags in the database.
obj.body_en = stream_field_filter_map(obj.body_en, "image_description", mapper)
obj.body_sv = stream_field_filter_map(obj.body_sv, "image_description", mapper)
obj.save();
def forwards(apps, schema_editor):
apply_to_all_pages(apps, imageicon_to_columns)
class Migration(migrations.Migration):
dependencies = [
('home', '0039_auto_20200404_1529'),
]
operations = [
migrations.RunPython(forwards)
]
| agpl-3.0 | -2,287,619,571,841,572,600 | 25.62 | 116 | 0.411721 | false | 4.511864 | false | false | false |
matthewoliver/swift | test/unit/common/test_storage_policy.py | 2 | 57607 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.common.storage_policies """
import contextlib
import six
import logging
import unittest
import os
import mock
from functools import partial
from six.moves.configparser import ConfigParser
from tempfile import NamedTemporaryFile
from test.unit import patch_policies, FakeRing, temptree, DEFAULT_TEST_EC_TYPE
import swift.common.storage_policy
from swift.common.storage_policy import (
StoragePolicyCollection, POLICIES, PolicyError, parse_storage_policies,
reload_storage_policies, get_policy_string, split_policy_string,
BaseStoragePolicy, StoragePolicy, ECStoragePolicy, REPL_POLICY, EC_POLICY,
VALID_EC_TYPES, DEFAULT_EC_OBJECT_SEGMENT_SIZE, BindPortsCache)
from swift.common.ring import RingData
from swift.common.exceptions import RingLoadError
from pyeclib.ec_iface import ECDriver
class CapturingHandler(logging.Handler):
def __init__(self):
super(CapturingHandler, self).__init__()
self._records = []
def emit(self, record):
self._records.append(record)
@contextlib.contextmanager
def capture_logging(log_name):
captured = CapturingHandler()
logger = logging.getLogger(log_name)
logger.addHandler(captured)
try:
yield captured._records
finally:
logger.removeHandler(captured)
@BaseStoragePolicy.register('fake')
class FakeStoragePolicy(BaseStoragePolicy):
"""
Test StoragePolicy class - the only user at the moment is
test_validate_policies_type_invalid()
"""
def __init__(self, idx, name='', is_default=False, is_deprecated=False,
object_ring=None):
super(FakeStoragePolicy, self).__init__(
idx, name, is_default, is_deprecated, object_ring)
class TestStoragePolicies(unittest.TestCase):
def _conf(self, conf_str):
conf_str = "\n".join(line.strip() for line in conf_str.split("\n"))
if six.PY2:
conf = ConfigParser()
else:
conf = ConfigParser(strict=False)
conf.readfp(six.StringIO(conf_str))
return conf
def assertRaisesWithMessage(self, exc_class, message, f, *args, **kwargs):
try:
f(*args, **kwargs)
except exc_class as err:
err_msg = str(err)
self.assertTrue(message in err_msg, 'Error message %r did not '
'have expected substring %r' % (err_msg, message))
else:
self.fail('%r did not raise %s' % (message, exc_class.__name__))
def test_policy_baseclass_instantiate(self):
self.assertRaisesWithMessage(TypeError,
"Can't instantiate BaseStoragePolicy",
BaseStoragePolicy, 1, 'one')
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
StoragePolicy(3, 'three', is_deprecated=True),
ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
])
def test_swift_info(self):
# the deprecated 'three' should not exist in expect
expect = [{'aliases': 'zero', 'default': True, 'name': 'zero', },
{'aliases': 'two', 'name': 'two'},
{'aliases': 'one', 'name': 'one'},
{'aliases': 'ten', 'name': 'ten'}]
swift_info = POLICIES.get_policy_info()
self.assertEqual(sorted(expect, key=lambda k: k['name']),
sorted(swift_info, key=lambda k: k['name']))
@patch_policies
def test_get_policy_string(self):
self.assertEqual(get_policy_string('something', 0), 'something')
self.assertEqual(get_policy_string('something', None), 'something')
self.assertEqual(get_policy_string('something', ''), 'something')
self.assertEqual(get_policy_string('something', 1),
'something' + '-1')
self.assertRaises(PolicyError, get_policy_string, 'something', 99)
@patch_policies
def test_split_policy_string(self):
expectations = {
'something': ('something', POLICIES[0]),
'something-1': ('something', POLICIES[1]),
'tmp': ('tmp', POLICIES[0]),
'objects': ('objects', POLICIES[0]),
'tmp-1': ('tmp', POLICIES[1]),
'objects-1': ('objects', POLICIES[1]),
'objects-': PolicyError,
'objects-0': PolicyError,
'objects--1': ('objects-', POLICIES[1]),
'objects-+1': PolicyError,
'objects--': PolicyError,
'objects-foo': PolicyError,
'objects--bar': PolicyError,
'objects-+bar': PolicyError,
# questionable, demonstrated as inverse of get_policy_string
'objects+0': ('objects+0', POLICIES[0]),
'': ('', POLICIES[0]),
'0': ('0', POLICIES[0]),
'-1': ('', POLICIES[1]),
}
for policy_string, expected in expectations.items():
if expected == PolicyError:
try:
invalid = split_policy_string(policy_string)
except PolicyError:
continue # good
else:
self.fail('The string %r returned %r '
'instead of raising a PolicyError' %
(policy_string, invalid))
self.assertEqual(expected, split_policy_string(policy_string))
# should be inverse of get_policy_string
self.assertEqual(policy_string, get_policy_string(*expected))
def test_defaults(self):
self.assertGreater(len(POLICIES), 0)
# test class functions
default_policy = POLICIES.default
self.assertTrue(default_policy.is_default)
zero_policy = POLICIES.get_by_index(0)
self.assertTrue(zero_policy.idx == 0)
zero_policy_by_name = POLICIES.get_by_name(zero_policy.name)
self.assertTrue(zero_policy_by_name.idx == 0)
def test_storage_policy_repr(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False),
ECStoragePolicy(10, 'ten',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
ECStoragePolicy(11, 'eleven',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3,
ec_duplication_factor=2)]
policies = StoragePolicyCollection(test_policies)
for policy in policies:
policy_repr = repr(policy)
self.assertTrue(policy.__class__.__name__ in policy_repr)
self.assertTrue('is_default=%s' % policy.is_default in policy_repr)
self.assertTrue('is_deprecated=%s' % policy.is_deprecated in
policy_repr)
self.assertTrue(policy.name in policy_repr)
if policy.policy_type == EC_POLICY:
self.assertTrue('ec_type=%s' % policy.ec_type in policy_repr)
self.assertTrue('ec_ndata=%s' % policy.ec_ndata in policy_repr)
self.assertTrue('ec_nparity=%s' %
policy.ec_nparity in policy_repr)
self.assertTrue('ec_segment_size=%s' %
policy.ec_segment_size in policy_repr)
if policy.ec_duplication_factor > 1:
self.assertTrue('ec_duplication_factor=%s' %
policy.ec_duplication_factor in
policy_repr)
collection_repr = repr(policies)
collection_repr_lines = collection_repr.splitlines()
self.assertTrue(
policies.__class__.__name__ in collection_repr_lines[0])
self.assertEqual(len(policies), len(collection_repr_lines[1:-1]))
for policy, line in zip(policies, collection_repr_lines[1:-1]):
self.assertTrue(repr(policy) in line)
with patch_policies(policies):
self.assertEqual(repr(POLICIES), collection_repr)
def test_validate_policies_defaults(self):
# 0 explicit default
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[0])
self.assertEqual(policies.default.name, 'zero')
# non-zero explicit default
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', True)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[2])
self.assertEqual(policies.default.name, 'two')
# multiple defaults
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True),
StoragePolicy(2, 'two', True)]
self.assertRaisesWithMessage(
PolicyError, 'Duplicate default', StoragePolicyCollection,
test_policies)
# nothing specified
test_policies = []
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, policies[0])
self.assertEqual(policies.default.name, 'Policy-0')
# no default specified with only policy index 0
test_policies = [StoragePolicy(0, 'zero')]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, policies[0])
# no default specified with multiple policies
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
self.assertRaisesWithMessage(
PolicyError, 'Unable to find default policy',
StoragePolicyCollection, test_policies)
def test_deprecate_policies(self):
# deprecation specified
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False, is_deprecated=True)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[0])
self.assertEqual(policies.default.name, 'zero')
self.assertEqual(len(policies), 3)
# multiple policies requires default
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False, is_deprecated=True),
StoragePolicy(2, 'two', False)]
self.assertRaisesWithMessage(
PolicyError, 'Unable to find default policy',
StoragePolicyCollection, test_policies)
def test_validate_policies_indexes(self):
# duplicate indexes
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(1, 'two', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
def test_validate_policy_params(self):
StoragePolicy(0, 'name') # sanity
# bogus indexes
self.assertRaises(PolicyError, FakeStoragePolicy, 'x', 'name')
self.assertRaises(PolicyError, FakeStoragePolicy, -1, 'name')
# non-zero Policy-0
self.assertRaisesWithMessage(PolicyError, 'reserved',
FakeStoragePolicy, 1, 'policy-0')
# deprecate default
self.assertRaisesWithMessage(
PolicyError, 'Deprecated policy can not be default',
FakeStoragePolicy, 1, 'Policy-1', is_default=True,
is_deprecated=True)
# weird names
names = (
'',
'name_foo',
'name\nfoo',
'name foo',
u'name \u062a',
'name \xd8\xaa',
)
for name in names:
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
FakeStoragePolicy, 1, name)
def test_validate_policies_names(self):
# duplicate names
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'zero', False),
StoragePolicy(2, 'two', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
def test_validate_policies_type_default(self):
# no type specified - make sure the policy is initialized to
# DEFAULT_POLICY_TYPE
test_policy = FakeStoragePolicy(0, 'zero', True)
self.assertEqual(test_policy.policy_type, 'fake')
def test_validate_policies_type_invalid(self):
class BogusStoragePolicy(FakeStoragePolicy):
policy_type = 'bogus'
# unsupported policy type - initialization with FakeStoragePolicy
self.assertRaisesWithMessage(PolicyError, 'Invalid type',
BogusStoragePolicy, 1, 'one')
def test_policies_type_attribute(self):
test_policies = [
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
StoragePolicy(3, 'three', is_deprecated=True),
ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.get_by_index(0).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(1).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(2).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(3).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(10).policy_type,
EC_POLICY)
def test_names_are_normalized(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'ZERO', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
policies = StoragePolicyCollection([StoragePolicy(0, 'zEro', True),
StoragePolicy(1, 'One', False)])
pol0 = policies[0]
pol1 = policies[1]
for name in ('zero', 'ZERO', 'zErO', 'ZeRo'):
self.assertEqual(pol0, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'zEro')
for name in ('one', 'ONE', 'oNe', 'OnE'):
self.assertEqual(pol1, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'One')
def test_multiple_names(self):
# checking duplicate on insert
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False, aliases='zero')]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
# checking correct retrival using other names
test_policies = [StoragePolicy(0, 'zero', True, aliases='cero, kore'),
StoragePolicy(1, 'one', False, aliases='uno, tahi'),
StoragePolicy(2, 'two', False, aliases='dos, rua')]
policies = StoragePolicyCollection(test_policies)
for name in ('zero', 'cero', 'kore'):
self.assertEqual(policies.get_by_name(name), test_policies[0])
for name in ('two', 'dos', 'rua'):
self.assertEqual(policies.get_by_name(name), test_policies[2])
# Testing parsing of conf files/text
good_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, tahi
default = yes
""")
policies = parse_storage_policies(good_conf)
self.assertEqual(policies.get_by_name('one'),
policies[0])
self.assertEqual(policies.get_by_name('one'),
policies.get_by_name('tahi'))
name_repeat_conf = self._conf("""
[storage-policy:0]
name = one
aliases = one
default = yes
""")
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
policies = parse_storage_policies(name_repeat_conf)
extra_commas_conf = self._conf("""
[storage-policy:0]
name = one
aliases = ,,one, ,
default = yes
""")
# Extra blank entries should be silently dropped
policies = parse_storage_policies(extra_commas_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, uno
default = yes
""")
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_conf)
def test_multiple_names_EC(self):
# checking duplicate names on insert
test_policies_ec = [
ECStoragePolicy(
0, 'ec8-2',
aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=8),
is_default=True),
ECStoragePolicy(
1, 'ec10-4',
aliases='ec8-2',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=10))]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies_ec)
# checking correct retrival using other names
good_test_policies_EC = [
ECStoragePolicy(0, 'ec8-2', aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=10),
is_default=True),
ECStoragePolicy(1, 'ec10-4', aliases='athena, minerva',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=14)),
ECStoragePolicy(2, 'ec4-2', aliases='poseidon, neptune',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
object_ring=FakeRing(replicas=6)),
ECStoragePolicy(3, 'ec4-2-dup', aliases='uzuki, rin',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
ec_duplication_factor=2,
object_ring=FakeRing(replicas=12)),
]
ec_policies = StoragePolicyCollection(good_test_policies_EC)
for name in ('ec8-2', 'zeus', 'jupiter'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[0])
for name in ('ec10-4', 'athena', 'minerva'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[1])
for name in ('ec4-2', 'poseidon', 'neptune'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[2])
for name in ('ec4-2-dup', 'uzuki', 'rin'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[3])
# Testing parsing of conf files/text
good_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, jupiter
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
[storage-policy:1]
name = ec10-4
aliases = poseidon, neptune
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
[storage-policy:2]
name = ec4-2-dup
aliases = uzuki, rin
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 4
ec_num_parity_fragments = 2
ec_duplication_factor = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
ec_policies = parse_storage_policies(good_ec_conf)
self.assertEqual(ec_policies.get_by_name('ec8-2'),
ec_policies[0])
self.assertEqual(ec_policies.get_by_name('ec10-4'),
ec_policies.get_by_name('poseidon'))
self.assertEqual(ec_policies.get_by_name('ec4-2-dup'),
ec_policies.get_by_name('uzuki'))
name_repeat_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = ec8-2
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
ec_policies = parse_storage_policies(name_repeat_ec_conf)
bad_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, zeus
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_ec_conf)
def test_add_remove_names(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
policies = StoragePolicyCollection(test_policies)
# add names
policies.add_policy_alias(1, 'tahi')
self.assertEqual(policies.get_by_name('tahi'), test_policies[1])
policies.add_policy_alias(2, 'rua', 'dos')
self.assertEqual(policies.get_by_name('rua'), test_policies[2])
self.assertEqual(policies.get_by_name('dos'), test_policies[2])
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.add_policy_alias, 2, 'double\n')
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.add_policy_alias, 2, '')
# try to add existing name
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 2, 'two')
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 1, 'two')
# remove name
policies.remove_policy_alias('tahi')
self.assertIsNone(policies.get_by_name('tahi'))
# remove only name
self.assertRaisesWithMessage(PolicyError,
'Policies must have at least one name.',
policies.remove_policy_alias, 'zero')
# remove non-existent name
self.assertRaisesWithMessage(PolicyError,
'No policy with name',
policies.remove_policy_alias, 'three')
# remove default name
policies.remove_policy_alias('two')
self.assertIsNone(policies.get_by_name('two'))
self.assertEqual(policies.get_by_index(2).name, 'rua')
# change default name to a new name
policies.change_policy_primary_name(2, 'two')
self.assertEqual(policies.get_by_name('two'), test_policies[2])
self.assertEqual(policies.get_by_index(2).name, 'two')
# change default name to an existing alias
policies.change_policy_primary_name(2, 'dos')
self.assertEqual(policies.get_by_index(2).name, 'dos')
# change default name to a bad new name
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.change_policy_primary_name,
2, 'bad\nname')
# change default name to a name belonging to another policy
self.assertRaisesWithMessage(PolicyError,
'Other policy',
policies.change_policy_primary_name,
1, 'dos')
def test_deprecated_default(self):
bad_conf = self._conf("""
[storage-policy:1]
name = one
deprecated = yes
default = yes
""")
self.assertRaisesWithMessage(
PolicyError, "Deprecated policy can not be default",
parse_storage_policies, bad_conf)
def test_multiple_policies_with_no_policy_index_zero(self):
bad_conf = self._conf("""
[storage-policy:1]
name = one
default = yes
""")
# Policy-0 will not be implicitly added if other policies are defined
self.assertRaisesWithMessage(
PolicyError, "must specify a storage policy section "
"for policy index 0", parse_storage_policies, bad_conf)
@mock.patch.object(swift.common.storage_policy, 'VALID_EC_TYPES',
['isa_l_rs_vand', 'isa_l_rs_cauchy'])
@mock.patch('swift.common.storage_policy.ECDriver')
def test_known_bad_ec_config(self, mock_driver):
good_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_cauchy
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(good_conf)
mock_driver.assert_called_once()
mock_driver.reset_mock()
self.assertFalse([(r.levelname, r.msg) for r in records])
good_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(good_conf)
mock_driver.assert_called_once()
mock_driver.reset_mock()
self.assertFalse([(r.levelname, r.msg) for r in records])
bad_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
""")
with capture_logging('swift.common.storage_policy') as records, \
self.assertRaises(PolicyError) as exc_mgr:
parse_storage_policies(bad_conf)
self.assertEqual(exc_mgr.exception.args[0],
'Storage policy bad-policy uses an EC '
'configuration known to harm data durability. This '
'policy MUST be deprecated.')
mock_driver.assert_not_called()
mock_driver.reset_mock()
self.assertEqual([r.levelname for r in records],
['WARNING'])
for msg in ('known to harm data durability',
'Any data in this policy should be migrated',
'https://bugs.launchpad.net/swift/+bug/1639691'):
self.assertIn(msg, records[0].msg)
slightly_less_bad_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
deprecated = true
[storage-policy:1]
name = good-policy
policy_type = erasure_coding
ec_type = isa_l_rs_cauchy
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
default = true
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(slightly_less_bad_conf)
self.assertEqual(2, mock_driver.call_count)
mock_driver.reset_mock()
self.assertEqual([r.levelname for r in records],
['WARNING'])
for msg in ('known to harm data durability',
'Any data in this policy should be migrated',
'https://bugs.launchpad.net/swift/+bug/1639691'):
self.assertIn(msg, records[0].msg)
def test_no_default(self):
orig_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = one
default = yes
""")
policies = parse_storage_policies(orig_conf)
self.assertEqual(policies.default, policies[1])
self.assertTrue(policies[0].name, 'Policy-0')
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = one
deprecated = yes
""")
# multiple polices and no explicit default
self.assertRaisesWithMessage(
PolicyError, "Unable to find default",
parse_storage_policies, bad_conf)
good_conf = self._conf("""
[storage-policy:0]
name = Policy-0
default = yes
[storage-policy:1]
name = one
deprecated = yes
""")
policies = parse_storage_policies(good_conf)
self.assertEqual(policies.default, policies[0])
self.assertTrue(policies[1].is_deprecated, True)
def test_parse_storage_policies(self):
# ValueError when deprecating policy 0
bad_conf = self._conf("""
[storage-policy:0]
name = zero
deprecated = yes
[storage-policy:1]
name = one
deprecated = yes
""")
self.assertRaisesWithMessage(
PolicyError, "Unable to find policy that's not deprecated",
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:-1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x-1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x:1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:1]
name = zero
boo = berries
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid option',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name =
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:3]
name = Policy-0
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:1]
name = policY-0
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = one
[storage-policy:1]
name = ONE
""")
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = good_stuff
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
# policy_type = erasure_coding
# missing ec_type, ec_num_data_fragments and ec_num_parity_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
""")
self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
parse_storage_policies, bad_conf)
# missing ec_type, but other options valid...
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
parse_storage_policies, bad_conf)
# ec_type specified, but invalid...
bad_conf = self._conf("""
[storage-policy:0]
name = zero
default = yes
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = garbage_alg
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
self.assertRaisesWithMessage(PolicyError,
'Wrong ec_type garbage_alg for policy '
'ec10-4, should be one of "%s"' %
(', '.join(VALID_EC_TYPES)),
parse_storage_policies, bad_conf)
# missing and invalid ec_num_parity_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_parity_fragments',
parse_storage_policies, bad_conf)
for num_parity in ('-4', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = %(num_parity)s
""" % {'ec_type': DEFAULT_TEST_EC_TYPE,
'num_parity': num_parity})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_parity_fragments',
parse_storage_policies, bad_conf)
# missing and invalid ec_num_data_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_parity_fragments = 4
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_data_fragments',
parse_storage_policies, bad_conf)
for num_data in ('-10', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = %(num_data)s
ec_num_parity_fragments = 4
""" % {'num_data': num_data, 'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_data_fragments',
parse_storage_policies, bad_conf)
# invalid ec_object_segment_size
for segment_size in ('-4', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_object_segment_size = %(segment_size)s
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""" % {'segment_size': segment_size,
'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_object_segment_size',
parse_storage_policies, bad_conf)
# Additional section added to ensure parser ignores other sections
conf = self._conf("""
[some-other-section]
foo = bar
[storage-policy:0]
name = zero
[storage-policy:5]
name = one
default = yes
[storage-policy:6]
name = duplicate-sections-are-ignored
[storage-policy:6]
name = apple
""")
policies = parse_storage_policies(conf)
self.assertEqual(True, policies.get_by_index(5).is_default)
self.assertEqual(False, policies.get_by_index(0).is_default)
self.assertEqual(False, policies.get_by_index(6).is_default)
self.assertEqual("object", policies.get_by_name("zero").ring_name)
self.assertEqual("object-5", policies.get_by_name("one").ring_name)
self.assertEqual("object-6", policies.get_by_name("apple").ring_name)
self.assertEqual(0, int(policies.get_by_name('zero')))
self.assertEqual(5, int(policies.get_by_name('one')))
self.assertEqual(6, int(policies.get_by_name('apple')))
self.assertEqual("zero", policies.get_by_index(0).name)
self.assertEqual("zero", policies.get_by_index("0").name)
self.assertEqual("one", policies.get_by_index(5).name)
self.assertEqual("apple", policies.get_by_index(6).name)
self.assertEqual("zero", policies.get_by_index(None).name)
self.assertEqual("zero", policies.get_by_index('').name)
self.assertEqual(policies.get_by_index(0), policies.legacy)
def test_reload_invalid_storage_policies(self):
conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:00]
name = double-zero
""")
with NamedTemporaryFile(mode='w+t') as f:
conf.write(f)
f.flush()
with mock.patch('swift.common.utils.SWIFT_CONF_FILE',
new=f.name):
try:
reload_storage_policies()
except SystemExit as e:
err_msg = str(e)
else:
self.fail('SystemExit not raised')
parts = [
'Invalid Storage Policy Configuration',
'Duplicate index',
]
for expected in parts:
self.assertTrue(
expected in err_msg, '%s was not in %s' % (expected,
err_msg))
def test_storage_policy_ordering(self):
test_policies = StoragePolicyCollection([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(503, 'error'),
StoragePolicy(204, 'empty'),
StoragePolicy(404, 'missing'),
])
self.assertEqual([0, 204, 404, 503], [int(p) for p in
sorted(list(test_policies))])
p503 = test_policies[503]
self.assertTrue(501 < p503 < 507)
def test_get_object_ring(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
policies = StoragePolicyCollection(test_policies)
class NamedFakeRing(FakeRing):
def __init__(self, swift_dir, ring_name=None):
self.ring_name = ring_name
super(NamedFakeRing, self).__init__()
with mock.patch('swift.common.storage_policy.Ring',
new=NamedFakeRing):
for policy in policies:
self.assertFalse(policy.object_ring)
ring = policies.get_object_ring(int(policy), '/path/not/used')
self.assertEqual(ring.ring_name, policy.ring_name)
self.assertTrue(policy.object_ring)
self.assertTrue(isinstance(policy.object_ring, NamedFakeRing))
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
with mock.patch('swift.common.storage_policy.Ring', new=blow_up):
for policy in policies:
policy.load_ring('/path/not/used')
expected = policies.get_object_ring(int(policy),
'/path/not/used')
self.assertEqual(policy.object_ring, expected)
# bad policy index
self.assertRaises(PolicyError, policies.get_object_ring, 99,
'/path/not/used')
def test_bind_ports_cache(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
my_ips = ['1.2.3.4', '2.3.4.5']
other_ips = ['3.4.5.6', '4.5.6.7']
bind_ip = my_ips[1]
devs_by_ring_name1 = {
'object': [ # 'aay'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6006},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6007},
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6008},
None,
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6009}],
'object-1': [ # 'bee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6006}, # dupe
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6010},
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6011},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6012}],
'object-2': [ # 'cee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6010}, # on our IP and a not-us IP
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6013},
None,
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6014},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6015}],
}
devs_by_ring_name2 = {
'object': [ # 'aay'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6016},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6019}],
'object-1': [ # 'bee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6016}, # dupe
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6022}],
'object-2': [ # 'cee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6020},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6025}],
}
ring_files = [ring_name + '.ring.gz'
for ring_name in sorted(devs_by_ring_name1)]
def _fake_load(gz_path, stub_objs, metadata_only=False):
return RingData(
devs=stub_objs[os.path.basename(gz_path)[:-8]],
replica2part2dev_id=[],
part_shift=24)
with mock.patch(
'swift.common.storage_policy.RingData.load'
) as mock_ld, \
patch_policies(test_policies), \
mock.patch('swift.common.storage_policy.whataremyips') \
as mock_whataremyips, \
temptree(ring_files) as tempdir:
mock_whataremyips.return_value = my_ips
cache = BindPortsCache(tempdir, bind_ip)
self.assertEqual([
mock.call(bind_ip),
], mock_whataremyips.mock_calls)
mock_whataremyips.reset_mock()
mock_ld.side_effect = partial(_fake_load,
stub_objs=devs_by_ring_name1)
self.assertEqual(set([
6006, 6008, 6011, 6010, 6014,
]), cache.all_bind_ports_for_node())
self.assertEqual([
mock.call(os.path.join(tempdir, ring_files[0]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[1]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[2]),
metadata_only=True),
], mock_ld.mock_calls)
mock_ld.reset_mock()
mock_ld.side_effect = partial(_fake_load,
stub_objs=devs_by_ring_name2)
self.assertEqual(set([
6006, 6008, 6011, 6010, 6014,
]), cache.all_bind_ports_for_node())
self.assertEqual([], mock_ld.mock_calls)
# but when all the file mtimes are made different, it'll
# reload
for gz_file in [os.path.join(tempdir, n)
for n in ring_files]:
os.utime(gz_file, (88, 88))
self.assertEqual(set([
6016, 6020,
]), cache.all_bind_ports_for_node())
self.assertEqual([
mock.call(os.path.join(tempdir, ring_files[0]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[1]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[2]),
metadata_only=True),
], mock_ld.mock_calls)
mock_ld.reset_mock()
# Don't do something stupid like crash if a ring file is missing.
os.unlink(os.path.join(tempdir, 'object-2.ring.gz'))
self.assertEqual(set([
6016, 6020,
]), cache.all_bind_ports_for_node())
self.assertEqual([], mock_ld.mock_calls)
# whataremyips() is only called in the constructor
self.assertEqual([], mock_whataremyips.mock_calls)
def test_singleton_passthrough(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
with patch_policies(test_policies):
for policy in POLICIES:
self.assertEqual(POLICIES[int(policy)], policy)
def test_quorum_size_replication(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
for n, expected in expected_sizes.items():
policy = StoragePolicy(0, 'zero',
object_ring=FakeRing(replicas=n))
self.assertEqual(policy.quorum, expected)
def test_quorum_size_erasure_coding(self):
test_ec_policies = [
ECStoragePolicy(10, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2),
ECStoragePolicy(11, 'df10-6', ec_type='flat_xor_hd_4',
ec_ndata=10, ec_nparity=6),
ECStoragePolicy(12, 'ec4-2-dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2, ec_duplication_factor=2),
]
for ec_policy in test_ec_policies:
k = ec_policy.ec_ndata
expected_size = (
(k + ec_policy.pyeclib_driver.min_parity_fragments_needed())
* ec_policy.ec_duplication_factor
)
self.assertEqual(expected_size, ec_policy.quorum)
def test_validate_ring(self):
test_policies = [
ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
is_default=True),
ECStoragePolicy(1, 'ec10-4', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
ECStoragePolicy(2, 'ec4-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2),
ECStoragePolicy(3, 'ec4-2-2dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
ec_duplication_factor=2)
]
policies = StoragePolicyCollection(test_policies)
class MockRingData(object):
def __init__(self, num_replica):
self.replica_count = num_replica
def do_test(actual_load_ring_replicas):
for policy, ring_replicas in zip(policies,
actual_load_ring_replicas):
with mock.patch('swift.common.ring.ring.RingData.load',
return_value=MockRingData(ring_replicas)):
necessary_replica_num = (policy.ec_n_unique_fragments *
policy.ec_duplication_factor)
with mock.patch(
'swift.common.ring.ring.validate_configuration'):
msg = 'EC ring for policy %s needs to be configured ' \
'with exactly %d replicas.' % \
(policy.name, necessary_replica_num)
self.assertRaisesWithMessage(RingLoadError, msg,
policy.load_ring, 'mock')
# first, do somethign completely different
do_test([8, 10, 7, 11])
# then again, closer to true, but fractional
do_test([9.9, 14.1, 5.99999, 12.000000001])
def test_storage_policy_get_info(self):
test_policies = [
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one', is_deprecated=True,
aliases='tahi, uno'),
ECStoragePolicy(10, 'ten',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
ECStoragePolicy(11, 'done', is_deprecated=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
]
policies = StoragePolicyCollection(test_policies)
expected = {
# default replication
(0, True): {
'name': 'zero',
'aliases': 'zero',
'default': True,
'deprecated': False,
'policy_type': REPL_POLICY
},
(0, False): {
'name': 'zero',
'aliases': 'zero',
'default': True,
},
# deprecated replication
(1, True): {
'name': 'one',
'aliases': 'one, tahi, uno',
'default': False,
'deprecated': True,
'policy_type': REPL_POLICY
},
(1, False): {
'name': 'one',
'aliases': 'one, tahi, uno',
'deprecated': True,
},
# enabled ec
(10, True): {
'name': 'ten',
'aliases': 'ten',
'default': False,
'deprecated': False,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 1,
},
(10, False): {
'name': 'ten',
'aliases': 'ten',
},
# deprecated ec
(11, True): {
'name': 'done',
'aliases': 'done',
'default': False,
'deprecated': True,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 1,
},
(11, False): {
'name': 'done',
'aliases': 'done',
'deprecated': True,
},
# enabled ec with ec_duplication
(12, True): {
'name': 'twelve',
'aliases': 'twelve',
'default': False,
'deprecated': False,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 2,
},
(12, False): {
'name': 'twelve',
'aliases': 'twelve',
},
}
self.maxDiff = None
for policy in policies:
expected_info = expected[(int(policy), True)]
self.assertEqual(policy.get_info(config=True), expected_info)
expected_info = expected[(int(policy), False)]
self.assertEqual(policy.get_info(config=False), expected_info)
def test_ec_fragment_size_cached(self):
policy = ECStoragePolicy(
0, 'ec2-1', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1, object_ring=FakeRing(replicas=3),
ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE, is_default=True)
ec_driver = ECDriver(ec_type=DEFAULT_TEST_EC_TYPE,
k=2, m=1)
expected_fragment_size = ec_driver.get_segment_info(
DEFAULT_EC_OBJECT_SEGMENT_SIZE,
DEFAULT_EC_OBJECT_SEGMENT_SIZE)['fragment_size']
with mock.patch.object(
policy.pyeclib_driver, 'get_segment_info') as fake:
fake.return_value = {
'fragment_size': expected_fragment_size}
for x in range(10):
self.assertEqual(expected_fragment_size,
policy.fragment_size)
# pyeclib_driver.get_segment_info is called only once
self.assertEqual(1, fake.call_count)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,921,339,471,042,839,000 | 38.565247 | 79 | 0.522315 | false | 4.123327 | true | false | false |
bkochendorfer/reviewboard | reviewboard/webapi/tests/test_review_reply.py | 3 | 12139 | from __future__ import unicode_literals
from django.core import mail
from django.utils import six
from reviewboard.reviews.models import Review
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (review_reply_item_mimetype,
review_reply_list_mimetype)
from reviewboard.webapi.tests.mixins import (BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_review import (ReviewItemMixin,
ReviewListMixin)
from reviewboard.webapi.tests.urls import (get_review_reply_item_url,
get_review_reply_list_url)
class BaseResourceTestCase(BaseWebAPITestCase):
def _create_test_review(self, with_local_site=False):
review_request = self.create_review_request(
submitter=self.user,
with_local_site=with_local_site)
file_attachment = self.create_file_attachment(review_request)
review_request.publish(review_request.submitter)
review = self.create_review(review_request, publish=True)
self.create_file_attachment_comment(review, file_attachment)
return review
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(ReviewListMixin, ReviewRequestChildListMixin,
BaseResourceTestCase):
"""Testing the ReviewReplyResource list APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/reviews/<id>/replies/'
resource = resources.review_reply
def setup_review_request_child_test(self, review_request):
review = self.create_review(review_request, publish=True)
return (get_review_reply_list_url(review),
review_reply_list_mimetype)
def compare_item(self, item_rsp, reply):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], reply.body_top)
self.assertEqual(item_rsp['body_bottom'], reply.body_bottom)
if reply.body_top_rich_text:
self.assertEqual(item_rsp['body_top_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_top_text_type'], 'plain')
if reply.body_bottom_rich_text:
self.assertEqual(item_rsp['body_bottom_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_bottom_text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, publish=True)
if populate_items:
items = [self.create_reply(review, publish=True)]
else:
items = []
return (get_review_reply_list_url(review, local_site_name),
review_reply_list_mimetype,
items)
def test_get_with_counts_only(self):
"""Testing the
GET review-requests/<id>/reviews/<id>/replies/?counts-only=1 API
"""
review = self._create_test_review()
self.create_reply(review, user=self.user, publish=True)
rsp = self.api_get(
'%s?counts-only=1' % get_review_reply_list_url(review),
expected_mimetype=review_reply_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['count'], 1)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, publish=True)
return (get_review_reply_list_url(review, local_site_name),
review_reply_item_mimetype,
{},
[review])
def check_post_result(self, user, rsp, review):
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertFalse(reply.body_top_rich_text)
self.compare_item(rsp['reply'], reply)
def test_post_with_body_top(self):
"""Testing the POST review-requests/<id>/reviews/<id>/replies/ API
with body_top
"""
body_top = 'My Body Top'
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
rsp = self.api_post(
get_review_reply_list_url(review),
{'body_top': body_top},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.body_top, body_top)
def test_post_with_body_bottom(self):
"""Testing the POST review-requests/<id>/reviews/<id>/replies/ API
with body_bottom
"""
body_bottom = 'My Body Bottom'
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
rsp = self.api_post(
get_review_reply_list_url(review),
{'body_bottom': body_bottom},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.body_bottom, body_bottom)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(ReviewItemMixin, ReviewRequestChildItemMixin,
BaseResourceTestCase):
"""Testing the ReviewReplyResource item APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/reviews/<id>/replies/<id>/'
resource = resources.review_reply
def setup_review_request_child_test(self, review_request):
review = self.create_review(review_request, publish=True)
reply = self.create_reply(review, publish=True)
return (get_review_reply_item_url(review, reply.pk),
review_reply_item_mimetype)
def compare_item(self, item_rsp, reply):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], reply.body_top)
self.assertEqual(item_rsp['body_bottom'], reply.body_bottom)
if reply.body_top_rich_text:
self.assertEqual(item_rsp['body_top_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_top_text_type'], 'plain')
if reply.body_bottom_rich_text:
self.assertEqual(item_rsp['body_bottom_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_bottom_text_type'], 'plain')
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
[reply, review])
def check_delete_result(self, user, reply, review):
self.assertNotIn(reply, review.replies.all())
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
review_reply_item_mimetype,
reply)
def test_get_not_modified(self):
"""Testing the GET review-requests/<id>/reviews/<id>/
with Not Modified response
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
reply = self.create_reply(review, publish=True)
self._testHttpCaching(
get_review_reply_item_url(reply.base_reply_to, reply.id),
check_etags=True)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
review_reply_item_mimetype,
{
'body_top': 'New body top',
},
reply,
[])
def check_put_result(self, user, item_rsp, reply, *args):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], 'New body top')
self.assertEqual(item_rsp['body_top_text_type'], 'plain')
reply = Review.objects.get(pk=reply.pk)
self.compare_item(item_rsp, reply)
def test_put_with_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/replies/<id>/?public=1 API
"""
self.siteconfig.set('mail_send_review_mail', True)
self.siteconfig.save()
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
mail.outbox = []
rsp, response = self.api_post_with_response(
get_review_reply_list_url(review),
expected_mimetype=review_reply_item_mimetype)
self.assertIn('Location', response)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
rsp = self.api_put(
response['Location'],
{
'body_top': 'Test',
'public': True,
},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.public, True)
self.assertEqual(len(mail.outbox), 1)
def test_put_with_publish_and_trivial(self):
"""Testing the PUT review-requests/<id>/draft/ API with trivial
changes
"""
self.siteconfig.set('mail_send_review_mail', True)
self.siteconfig.save()
review_request = self.create_review_request(submitter=self.user,
publish=True)
review = self.create_review(review_request, publish=True)
mail.outbox = []
rsp, response = self.api_post_with_response(
get_review_reply_list_url(review),
expected_mimetype=review_reply_item_mimetype)
self.assertIn('Location', response)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
rsp = self.api_put(
response['Location'],
{
'body_top': 'Test',
'public': True,
'trivial': True
},
expected_mimetype=review_reply_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('reply', rsp)
self.assertIn('id', rsp['reply'])
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertTrue(reply.public)
self.assertEqual(len(mail.outbox), 0)
| mit | -2,576,674,679,771,962,000 | 34.914201 | 78 | 0.598484 | false | 3.914544 | true | false | false |
OCA/connector-telephony | connector_voicent/models/backend_voicent_call_line.py | 1 | 4618 | # Copyright (C) 2019 Open Source Integrators
# <https://www.opensourceintegrators.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
VOICENT_CONTACT_COLUMNS = [('Assigned To', 'Assigned To'),
('Business', 'Business'),
('Category', 'Category'),
('Contact Status', 'Contact Status'),
('Email', 'Email'),
('First Name', 'First Name (Required)'),
('Last Name', 'Last Name'),
('Lead Source', 'Lead Source'),
('Other', 'Other'),
('Phone', 'Phone (Required)')]
VOICENT_REPLY = [('availableagents', 'Available Agents'),
('callback', 'Callback'),
('campid', 'Campaign ID'),
('campname', 'Campaign Name'),
('campsize', 'Campaign Size'),
('connected', 'Connected'),
('dnc', 'Contact DNC'),
('nophone', 'Contact No Phone'),
('disc', 'Disc. Number'),
('dropped', 'Dropped'),
('failed', 'Failed'),
('fax', 'Fax'),
('info', 'Info'),
('in', 'Interested'),
('lines', 'Lines'),
('linebusy', 'Line Busy'),
('live', 'Live Answer'),
('machine', 'Machine Answer'),
('made', 'Made'),
('maxlines', 'Max Lines'),
('noact', 'No Activity'),
('noanswer', 'No Answer'),
('notin', 'Not Interested'),
('notes', 'Notes'),
('optout', 'Opt Out'),
('serverr', 'Service Error'),
('status', 'Status'),
('totalagents', 'Total Agents'),
('wit', 'Wit')]
MSGTYPE = [('audio', 'Audio'),
('ivr', 'IVR'),
('survey', 'Survey'),
('template', 'Template'),
('tts', 'Text-To-Speech')]
class BackendVoicentCallLine(models.Model):
_name = 'backend.voicent.call.line'
_description = 'Voicent Backend Call Line'
name = fields.Char(string='Name', required=True)
sequence = fields.Integer(string='Sequence', default=0)
applies_on = fields.Selection(string='Applies on', selection=[])
msgtype = fields.Selection(MSGTYPE, string='Message Type', required=True)
msginfo = fields.Char(string='Message Info')
backend_id = fields.Many2one(
string='Backend',
comodel_name='backend.voicent',
ondelete='set null')
reply_ids = fields.One2many('backend.voicent.call.line.reply', 'line_id',
string="Replies")
contact_ids = fields.One2many('backend.voicent.call.line.contact',
'line_id',
string="Contact Info")
class BackendVoicentCallLineContact(models.Model):
_name = 'backend.voicent.call.line.contact'
_description = 'Columns of the CSV file to provide the contact list'
_order = 'sequence'
name = fields.Selection(VOICENT_CONTACT_COLUMNS, string='Voicent Field',
required=True)
other = fields.Char(string='Other')
sequence = fields.Integer(string='Sequence', default=0)
field_domain = fields.Char(string='Odoo Field',
required=True)
default_value = fields.Char(string='Default Value', required=True)
line_id = fields.Many2one(
string='Call Line',
comodel_name='backend.voicent.call.line',
ondelete='set null')
class BackendVoicentCallLineReply(models.Model):
_name = 'backend.voicent.call.line.reply'
_description = 'Reply to a Voicent Call'
name = fields.Char(string='Name', required=True)
line_id = fields.Many2one(
string='Call Line',
comodel_name='backend.voicent.call.line',
ondelete='set null')
reply_field = fields.Selection(VOICENT_REPLY, string="Voicent Reply Field",
required=True)
reply_value = fields.Char(string="Voicent Reply Value", required=True)
action_id = fields.Many2one('ir.actions.server', string="Server Action",
required=True,
help="""If the Voicent reply field is equal to
the Voicent reply value, the server action is
executed.""")
| agpl-3.0 | 9,036,391,513,208,413,000 | 41.366972 | 79 | 0.508012 | false | 4.287837 | false | false | false |
ingvagabund/gofed | cmd/goapidiff/goapidiff.py | 2 | 5372 | # Check difference of APIs of two commits
# Output is number of symbols added and removed.
# You can list of those symbols as well
# Projects that change exported symbols with each commit should not be used
# as a built or install time dependency until they stabilize.
import logging
import os
from gofedlib.utils import YELLOW, RED, BLUE, ENDC
from gofedinfra.system.core.factory.actfactory import ActFactory
from infra.system.core.factory.fakeactfactory import FakeActFactory
from gofedlib.projectsignature.parser import ProjectSignatureParser
from infra.system.artefacts.artefacts import ARTEFACT_GOLANG_PROJECTS_API_DIFF
from cmdsignature.parser import CmdSignatureParser
from gofedlib.utils import getScriptDir
def checkOptions(options):
if options.prefix != "" and options.prefix[-1] == '/':
logging.error("--prefix can not end with '/'")
exit(1)
def displayApiDifference(data, options):
color = options.color
prefix = options.prefix
data = data["data"]
def print_removed(item):
if color:
return "%s-%s%s" % (RED, item, ENDC)
else:
return "-%s" % item
def print_new(item):
if color:
return "%s+%s%s" % (BLUE, item, ENDC)
else:
return "+%s" % item
def print_updated(item):
if color:
return "%s~%s%s" % (YELLOW, item, ENDC)
else:
return "~%s" % item
# if no option set, print removed symbols
if not options.all and not options.removed and not options.new and not options.updated:
options.removed = True
new = []
removed = []
updated = []
# print removed packages
if (options.removed or options.all) and "removedpackages" in data:
for package in data["removedpackages"]:
if options.prefix == "":
line = print_removed(package)
else:
line = print_removed("%s/%s" % (options.prefix, package))
if line:
removed.append(line)
# print new packages
if (options.new or options.all) and "newpackages" in data:
for package in data["newpackages"]:
if options.prefix == "":
line = print_new(package)
else:
line = print_new("%s/%s" % (options.prefix, package))
if line:
new.append(line)
# print updated packages
if "updatedpackages" in data:
for package in data["updatedpackages"]:
if options.prefix == "":
package_name = package["package"]
else:
package_name = "%s/%s" % (options.prefix, package["package"])
for symbol_type in package:
if symbol_type == "package":
continue
if symbol_type == "functions":
prefix = "function"
elif symbol_type == "types":
prefix = "type"
elif symbol_type == "variables":
prefix = "variable"
else:
raise ValueError("Unsupported symbol type: %s" % symbol_type)
for state in package[symbol_type]:
for symbol in package[symbol_type][state]:
if state.startswith("new"):
line = print_new("%s: new %s: %s" % (package_name, prefix, symbol))
if line and (options.new or options.all):
new.append(line)
if not options.sorted:
print line
if state.startswith("removed"):
line = print_removed("%s: %s removed: %s" % (package_name, prefix, symbol))
if line and (options.removed or options.all):
removed.append(line)
if not options.sorted:
print line
if state.startswith("updated"):
line = print_updated("%s: %s updated: %s" % (package_name, prefix, symbol))
if line and (options.updated or options.all):
updated.append(line)
if not options.sorted:
print line
if options.sorted:
for line in sorted(new):
print line
for line in sorted(removed):
print line
for line in sorted(updated):
print line
if __name__ == "__main__":
cur_dir = getScriptDir(__file__)
gen_flags = "%s/%s.yml" % (cur_dir, os.path.basename(__file__).split(".")[0])
parser = CmdSignatureParser([gen_flags]).generate().parse()
if not parser.check():
exit(1)
options = parser.options()
args = parser.args()
checkOptions(options)
try:
reference_project_signature = ProjectSignatureParser().parse(options.reference)
except ValueError as e:
logging.error(e)
exit(1)
try:
compare_with_project_signature = ProjectSignatureParser().parse(options.comparewith)
except ValueError as e:
logging.error(e)
exit(1)
data = {"reference": {}, "compared_with": {}}
if reference_project_signature["provider_type"] == "upstream_repository":
data["reference"] = {
"type": "upstream_source_code",
"repository": reference_project_signature["provider"],
"commit": reference_project_signature["commit"]
}
else:
data["reference"] = {
"type": "user_directory",
"resource": reference_project_signature["provider"]["location"]
}
if compare_with_project_signature["provider_type"] == "upstream_repository":
data["compared_with"] = {
"type": "upstream_source_code",
"repository": compare_with_project_signature["provider"],
"commit": compare_with_project_signature["commit"]
}
else:
data["compared_with"] = {
"type": "user_directory",
"resource": compare_with_project_signature["provider"]["location"]
}
if options.dryrun:
act_factory = FakeActFactory()
else:
act_factory = ActFactory()
try:
data = act_factory.bake("go-exported-api-diff").call(data)
except Exception as e:
logging.error(e)
exit(1)
displayApiDifference(data[ARTEFACT_GOLANG_PROJECTS_API_DIFF], options)
| gpl-2.0 | 3,897,696,865,895,849,500 | 25.994975 | 88 | 0.671072 | false | 3.279609 | false | false | false |
redhat-openstack/python-openstackclient | openstackclient/compute/v2/floatingip.py | 1 | 2315 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Floating IP action implementations"""
from openstackclient.common import command
from openstackclient.common import utils
class AddFloatingIP(command.Command):
"""Add floating IP address to server"""
def get_parser(self, prog_name):
parser = super(AddFloatingIP, self).get_parser(prog_name)
parser.add_argument(
"ip_address",
metavar="<ip-address>",
help="IP address to add to server (name only)",
)
parser.add_argument(
"server",
metavar="<server>",
help="Server to receive the IP address (name or ID)",
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers, parsed_args.server)
server.add_floating_ip(parsed_args.ip_address)
class RemoveFloatingIP(command.Command):
"""Remove floating IP address from server"""
def get_parser(self, prog_name):
parser = super(RemoveFloatingIP, self).get_parser(prog_name)
parser.add_argument(
"ip_address",
metavar="<ip-address>",
help="IP address to remove from server (name only)",
)
parser.add_argument(
"server",
metavar="<server>",
help="Server to remove the IP address from (name or ID)",
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers, parsed_args.server)
server.remove_floating_ip(parsed_args.ip_address)
| apache-2.0 | -5,583,730,177,318,009,000 | 31.605634 | 77 | 0.642333 | false | 4.239927 | false | false | false |
better-dem/portal | legislators/management/commands/legislators_update.py | 1 | 1860 | from django.core.management.base import BaseCommand, CommandError
from legislators.models import LegislatorsProject, LegislatorsItem, BillsProject, BillsItem
import core.models as cm
import core.tasks as ct
from django.db import transaction
from django.utils import timezone
import argparse
import sys
import pyopenstates
import zipfile
import os
import json
import core.models as cm
class Command(BaseCommand):
help = """
Creates job status objects for each of the states in openstates.
A utility to update all legislators using the openstates API
usage: python manage.py legislators_update
"""
def handle(self, *args, **options):
metadata = pyopenstates.get_metadata()
num_jobs_created = 0
num_jobs_modified = 0
job_timeout = 60*60*2
for state in metadata:
obj, created = cm.LongJobState.objects.get_or_create(
app_name = "legislators",
name="update_state|{}|{}".format(state["name"], state["abbreviation"]),
defaults={"job_period":60*60*24, "job_timeout": job_timeout, "most_recent_update": timezone.now() - timezone.timedelta(24*60*60)}
)
if created:
sys.stdout.write("Created job for state: {}\n".format(state["name"]))
sys.stdout.flush()
num_jobs_created += 1
elif obj.job_timeout != job_timeout:
obj.job_timeout = job_timeout
obj.save()
sys.stdout.write("Modified timeout for state: {}\n".format(state["name"]))
sys.stdout.flush()
num_jobs_modified += 1
sys.stdout.write("Created {} jobs\n".format(num_jobs_created))
sys.stdout.write("Modified {} jobs\n".format(num_jobs_modified))
sys.stdout.write("DONE\n")
sys.stdout.flush()
| agpl-3.0 | -1,296,061,344,677,371,600 | 36.959184 | 145 | 0.626882 | false | 4.034707 | false | false | false |
renoirb/browsercompat | webplatformcompat/view_serializers.py | 1 | 31413 | # -*- coding: utf-8 -*-
"""API Serializers"""
from collections import OrderedDict
from itertools import chain
from json import dumps
from django.conf import settings
from django.core.paginator import Paginator
from drf_cached_instances.models import CachedQueryset
from rest_framework.reverse import reverse
from rest_framework.serializers import (
ModelSerializer, PrimaryKeyRelatedField, SerializerMethodField,
ValidationError)
from rest_framework.utils.serializer_helpers import ReturnDict
from tools.resources import Collection, CollectionChangeset
from .cache import Cache
from .models import (
Browser, Feature, Maturity, Section, Specification, Support, Version)
from .serializers import (
BrowserSerializer, FeatureSerializer, MaturitySerializer,
SectionSerializer, SpecificationSerializer, SupportSerializer,
VersionSerializer, omit_some)
class ViewBrowserSerializer(BrowserSerializer):
class Meta(BrowserSerializer.Meta):
fields = omit_some(BrowserSerializer.Meta.fields, 'versions')
class ViewMaturitySerializer(MaturitySerializer):
class Meta(MaturitySerializer.Meta):
fields = omit_some(MaturitySerializer.Meta.fields, 'specifications')
class ViewSectionSerializer(SectionSerializer):
class Meta(SectionSerializer.Meta):
fields = omit_some(SectionSerializer.Meta.fields, 'features')
class ViewSpecificationSerializer(SpecificationSerializer):
class Meta(SpecificationSerializer.Meta):
fields = omit_some(SpecificationSerializer.Meta.fields, 'sections')
class ViewVersionSerializer(VersionSerializer):
class Meta(VersionSerializer.Meta):
fields = omit_some(VersionSerializer.Meta.fields, 'supports')
read_only_fields = omit_some(
VersionSerializer.Meta.read_only_fields, 'supports')
# Map resource names to model, view serializer classes
view_cls_by_name = {
'features': (Feature, FeatureSerializer),
'supports': (Support, SupportSerializer),
'maturities': (Maturity, ViewMaturitySerializer),
'specifications': (Specification, ViewSpecificationSerializer),
'sections': (Section, ViewSectionSerializer),
'browsers': (Browser, ViewBrowserSerializer),
'versions': (Version, ViewVersionSerializer),
}
class ViewFeatureListSerializer(ModelSerializer):
"""Get list of features"""
url = SerializerMethodField()
def get_url(self, obj):
return reverse(
'viewfeatures-detail', kwargs={'pk': obj.id},
request=self.context['request'])
class Meta:
model = Feature
fields = (
'url', 'id', 'slug', 'mdn_uri', 'experimental', 'standardized',
'stable', 'obsolete', 'name')
class DjangoResourceClient(object):
"""Implement tools.client.Client using Django native functions"""
def url(self, resource_type, resource_id=None):
"""Use Django reverse to determine URL."""
if resource_type == 'maturities':
singular = 'maturity'
else:
singular = resource_type[:-1]
if resource_id:
return reverse(
singular + '-detail', kwargs={'pk': resource_id})
else:
return reverse(singular + '-list')
def open_changeset(self):
"""Skip opening changesets (opened at the request/view level)."""
pass
def close_changeset(self):
"""Skip closing changesets (closed at the request/view level)."""
pass
def update(self, resource_type, resource_id, resource):
model_cls, serializer_cls = view_cls_by_name[resource_type]
instance = model_cls.objects.get(id=resource_id)
data = resource.copy()
links = data.pop('links', {})
data.update(links)
serializer = serializer_cls(instance=instance, data=data)
assert serializer.is_valid(), serializer.errors
serializer.save()
def create(self, resource_type, resource):
model_cls, serializer_cls = view_cls_by_name[resource_type]
data = resource.copy()
links = data.pop('links', {})
data.update(links)
serializer = serializer_cls(data=data)
assert serializer.is_valid(), serializer.errors
obj = serializer.save()
return {'id': obj.id}
def delete(self, resource_type, resource_id):
raise NotImplementedError("delete not implemented for safety")
class FeatureExtra(object):
"""Handle new and updated data in a view_feature update"""
def __init__(self, data, feature, context):
self.data = data
self.feature = feature
self.context = context
def is_valid(self):
"""Validate the linked data"""
self.errors = {}
self._process_data()
self._validate_changes()
return not self.errors
def load_resource(self, resource_cls, data):
"""Load a resource, converting data to look like wire data
Conversions:
- Stringify IDs (5 -> "5")
- Convert Date to ISO 8601 (2015-02-17)
"""
rdata = {}
wlinks = getattr(resource_cls, '_writeable_link_fields', {})
rlinks = getattr(resource_cls, '_readonly_link_fields', {})
link_names = set(['id'] + list(wlinks.keys()) + list(rlinks.keys()))
for key, value in data.items():
if key in link_names:
if isinstance(value, list):
raw_ids = value
unlist = False
else:
raw_ids = [value]
unlist = True
ids = []
for i in raw_ids:
if i is None:
ids.append(None)
else:
ids.append(str(i))
if unlist:
rdata[key] = ids[0]
else:
rdata[key] = ids
else:
rdata[key] = value
return resource_cls(**rdata)
def _process_data(self):
"""Load the linked data and compare to current data."""
assert not hasattr(self, 'changes')
assert hasattr(self, 'errors')
r_by_t = Collection.resource_by_type
# Create and load collection of new data
new_collection = Collection()
for rtype, items in self.data.items():
resource_cls = r_by_t.get(rtype)
if resource_cls:
for seq, json_api_item in enumerate(items):
item = json_api_item.copy()
links = item.pop('links', {})
item.update(links)
resource = self.load_resource(resource_cls, item)
resource._seq = seq
new_collection.add(resource)
# Create native representation of current feature data
current_collection = Collection(DjangoResourceClient())
feature_serializer = ViewFeatureSerializer(context=self.context)
current_feature = feature_serializer.to_representation(self.feature)
current_extra = current_feature.pop('_view_extra')
del current_extra['meta']
# Load feature into new and current collection
current_feature_resource = self.load_resource(
r_by_t['features'], current_feature)
current_collection.add(current_feature_resource)
current_feature.update(self.feature._in_extra)
current_feature['id'] = str(current_feature['id'])
resource_feature = self.load_resource(
r_by_t['features'], current_feature)
resource_feature._seq = None
new_collection.add(resource_feature)
# Populate collection of current data
for rtype, items in current_extra.items():
resource_cls = r_by_t[rtype]
for item in items:
resource = self.load_resource(resource_cls, item)
current_collection.add(resource)
# Add existing items not explicit in PUT content
# This avoids 'delete' changes
new_items = new_collection.get_all_by_data_id()
for data_id, item in current_collection.get_all_by_data_id().items():
if data_id not in new_items:
resource = r_by_t[item._resource_type]()
resource.from_json_api(item.to_json_api())
resource._seq = None
new_collection.add(resource)
# Add existing items used in new collection to current collection
# This avoids incorrect 'new' changes
existing_items = current_collection.get_all_by_data_id()
for data_id, item in new_collection.get_all_by_data_id().items():
if item.id:
item_id = item.id.id
int_id = None
existing_item = existing_items.get(data_id)
try:
int_id = int(item_id)
except ValueError:
pass
if int_id and (existing_item is None):
rtype = item._resource_type
resource_cls = r_by_t[rtype]
model_cls, serializer_cls = view_cls_by_name[rtype]
obj = model_cls.objects.get(id=int_id)
serializer = serializer_cls()
data = serializer.to_representation(obj)
resource = self.load_resource(resource_cls, data)
current_collection.add(resource)
# Load the diff
self.changeset = CollectionChangeset(
current_collection, new_collection)
assert not self.changeset.changes.get('deleted')
def add_error(self, resource_type, seq, error_dict):
"""Add a validation error for a linked resource."""
self.errors.setdefault(
resource_type, {}).setdefault(seq, {}).update(error_dict)
def _validate_changes(self):
"""Validate the changes.
Validation includes:
- Field validation of properties
- Disallow adding features outside of the target feature's subtree
- Disallow additions of maturities
Validation of links is not attempted, since most validation errors
will be relations to new resources. This may miss links to
"existing" resources that aren't in the database, but those will
be DoesNotExist exceptions in _process_data.
"""
assert hasattr(self, 'changeset')
assert hasattr(self, 'errors')
assert not self.errors
new_collection = self.changeset.new_collection
resource_feature = new_collection.get('features', str(self.feature.id))
# Validate with DRF serializers
for data_id, item in new_collection.get_all_by_data_id().items():
rtype = item._resource_type
model_cls, serializer_cls = view_cls_by_name[rtype]
seq = getattr(item, '_seq')
if seq is None:
continue
# Does the ID imply an existing instance?
int_id = None
instance = None
assert item.id
item_id = item.id.id
try:
int_id = int(item_id)
except ValueError:
pass
else:
instance = model_cls.objects.get(id=int_id)
# Validate the data with DRF serializer
data = item.to_json_api()[rtype]
links = data.pop('links', {})
data.update(links)
serializer = serializer_cls(instance=instance, data=data)
if not serializer.is_valid():
errors = {}
# Discard errors in link fields, for now
for fieldname, error in serializer.errors.items():
if fieldname not in links:
errors[fieldname] = error
if errors:
self.add_error(rtype, seq, errors)
# Validate that features are in the feature tree
target_id = resource_feature.id.id
for feature in new_collection.get_resources('features'):
if feature.id.id == target_id:
continue
f = feature
while (f and f.parent is not None and
f.parent.id != target_id):
f = new_collection.get('features', f.parent.id)
if f is None or f.parent.id is None:
error = (
"Feature must be a descendant of feature %s." % target_id)
self.add_error('features', feature._seq, {'parent': error})
# Validate that "expert" objects are not added
expert_resources = set((
'maturities', 'specifications', 'versions', 'browsers'))
add_error = (
'Resource can not be created as part of this update. Create'
' first, and try again.')
for item in self.changeset.changes['new'].values():
if item._resource_type in expert_resources:
self.add_error(
item._resource_type, item._seq, {'id': add_error})
# Validate that "expert" objects are not changed
change_err = (
'Field can not be changed from %s to %s as part of this update.'
' Update the resource by itself, and try again.')
for item in self.changeset.changes['changed'].values():
if item._resource_type in expert_resources:
rtype = item._resource_type
new_json = dict(item.to_json_api()[rtype])
new_json.update(new_json.pop('links', {}))
orig_json = dict(item._original.to_json_api()[rtype])
orig_json.update(orig_json.pop('links', {}))
for key, value in orig_json.items():
if value != new_json.get(key, "(missing)"):
err = change_err % (dumps(value), dumps(new_json[key]))
self.add_error(rtype, item._seq, {key: err})
def save(self, **kwargs):
"""Commit changes to linked data"""
self.changeset.change_original_collection()
# Adding sub-features will change the MPTT tree through direct SQL.
# Load the new tree data from the database before parent serializer
# overwrites it with old values.
tree_attrs = ['lft', 'rght', 'tree_id', 'level', 'parent']
db_feature = Feature.objects.only(*tree_attrs).get(id=self.feature.id)
for attr in tree_attrs:
setattr(self.feature, attr, getattr(db_feature, attr))
class ViewFeatureExtraSerializer(ModelSerializer):
"""Linked resources and metadata for ViewFeatureSerializer."""
browsers = ViewBrowserSerializer(source='all_browsers', many=True)
features = FeatureSerializer(source='child_features', many=True)
maturities = ViewMaturitySerializer(source='all_maturities', many=True)
sections = ViewSectionSerializer(source='all_sections', many=True)
specifications = ViewSpecificationSerializer(source='all_specs', many=True)
supports = SupportSerializer(source='all_supports', many=True)
versions = ViewVersionSerializer(source='all_versions', many=True)
meta = SerializerMethodField()
def add_sources(self, obj):
"""Add the sources used by the serializer fields."""
page = self.context['request'].GET.get('page', 1)
per_page = settings.PAGINATE_VIEW_FEATURE
if self.context['include_child_pages']:
# Paginate the full descendant tree
child_queryset = self.get_all_descendants(obj, per_page)
paginated_child_features = Paginator(child_queryset, per_page)
obj.page_child_features = paginated_child_features.page(page)
obj.child_features = obj.page_child_features.object_list
else:
# Jut the row-level descendants, but un-paginated
child_queryset = self.get_row_descendants(obj)
obj.child_features = list(child_queryset.all())
# Load the remaining related instances
section_pks = set(obj.sections.values_list('id', flat=True))
support_pks = set(obj.supports.values_list('id', flat=True))
for feature in obj.child_features:
section_pks.update(feature.sections.values_list('id', flat=True))
support_pks.update(feature.supports.values_list('id', flat=True))
obj.all_sections = list(CachedQueryset(
Cache(), Section.objects.all(), sorted(section_pks)))
obj.all_supports = list(CachedQueryset(
Cache(), Support.objects.all(), sorted(support_pks)))
specification_pks = set()
for section in obj.all_sections:
specification_pks.add(section.specification.pk)
obj.all_specs = list(CachedQueryset(
Cache(), Specification.objects.all(), sorted(specification_pks)))
maturity_pks = set()
for specification in obj.all_specs:
maturity_pks.add(specification.maturity.pk)
obj.all_maturities = list(CachedQueryset(
Cache(), Maturity.objects.all(), sorted(maturity_pks)))
version_pks = set()
for support in obj.all_supports:
version_pks.add(support.version.pk)
obj.all_versions = list(CachedQueryset(
Cache(), Version.objects.all(), sorted(version_pks)))
browser_pks = set()
for version in obj.all_versions:
browser_pks.add(version.browser.pk)
obj.all_browsers = list(CachedQueryset(
Cache(), Browser.objects.all(), sorted(browser_pks)))
def get_all_descendants(self, obj, per_page):
"""Return a CachedQueryset of all the descendants
This includes row features that model rows in the MDN table,
and page features that model sub-pages on MDN, which may have
row and subpage features of their own.
"""
if isinstance(obj, Feature):
# It's a real Feature, not a cached proxy Feature
obj.descendant_count = obj.get_descendant_count()
descendant_pks = obj.get_descendants().values_list('pk', flat=True)
elif obj.descendant_count <= per_page:
# The cached PK list is enough to populate descendant_pks
descendant_pks = obj.descendants.values_list('id', flat=True)
else:
# Load the real object to get the full list of descendants
real_obj = Feature.objects.get(id=obj.id)
descendant_pks = real_obj.get_descendants().values_list(
'pk', flat=True)
return CachedQueryset(
Cache(), Feature.objects.all(), descendant_pks)
def get_row_descendants(self, obj):
"""Return a CachedQueryset of just the row descendants
This includes row features, and subfeatures of rows that are also
row features.
See http://bit.ly/1MUSEFL for one example of spliting a large table
into a hierarchy of features.
"""
row_descendant_pks = obj.row_descendants.values_list('id', flat=True)
return CachedQueryset(
Cache(), Feature.objects.all(), row_descendant_pks)
def to_representation(self, obj):
"""Add addditonal data for the ViewFeatureSerializer.
For most features, all the related data is cachable, and no database
reads are required with a warm cache.
For some features, such as the root node for CSS, the subtree is huge,
and the descendant feature PKs won't fit in the cache. In these
cases, a couple of database reads are required to get the
descendant feature PKs, which are then paginated to reduce the huge
amount of related data.
"""
# Load the paginated descendant features
if obj is None:
# This happens when OPTIONS is called from browsable API
return None
self.add_sources(obj)
ret = super(ViewFeatureExtraSerializer, self).to_representation(obj)
return ReturnDict(ret, serializer=self)
def find_languages(self, obj):
"""Find languages used in feature view."""
languages = set()
def add_langs(item):
if hasattr(item, 'keys'): # pragma: nocover
languages.update(item.keys())
for browser in obj.all_browsers:
add_langs(browser.name)
add_langs(browser.note)
for feature in chain([obj], obj.child_features):
add_langs(feature.mdn_uri)
add_langs(feature.name)
for maturity in obj.all_maturities:
add_langs(maturity.name)
for section in obj.all_sections:
add_langs(section.number)
add_langs(section.name)
add_langs(section.subpath)
add_langs(section.note)
for spec in obj.all_specs:
add_langs(spec.name)
add_langs(spec.uri)
for support in obj.all_supports:
add_langs(support.note)
for version in obj.all_versions:
add_langs(version.release_notes_uri)
add_langs(version.note)
if 'zxx' in languages:
# No linguistic content
languages.remove('zxx')
if 'en' in languages:
languages.remove('en')
return ['en'] + sorted(languages)
else:
return sorted(languages)
def significant_changes(self, obj):
"""Determine what versions are important for support changes.
A version is important if it is the first version with support
information, or it changes support from the previous version.
"""
# Create lookup of ID/PK -> instances
browsers = {}
for browser in obj.all_browsers:
# Cache version order
browser.version_ids = browser.versions.values_list('id', flat=True)
browsers[browser.id] = browser
versions = dict(
[(version.id, version) for version in obj.all_versions])
features = dict(
[(feature.id, feature) for feature in obj.child_features])
features[obj.id] = obj
# Create index of supported browser / version / features
supported = []
for support in obj.all_supports:
version = versions[support.version.pk]
browser = browsers[version.browser.pk]
version_order = browser.version_ids.index(version.id)
feature = features[support.feature.pk]
support_attrs = (
support.support,
support.prefix,
support.prefix_mandatory,
support.alternate_name,
support.alternate_mandatory,
support.requires_config,
support.default_config,
support.protected,
repr(support.note),
)
supported.append((
feature.id, browser.id, version_order, version.id,
support.id, support_attrs))
supported.sort()
# Identify significant browser / version / supports by feature
sig_features = {}
last_f_id = None
last_b_id = None
last_support = None
for f_id, b_id, _, v_id, s_id, support in supported:
if last_f_id != f_id:
last_support = None
last_f_id = f_id
if last_b_id != b_id:
last_support = None
last_b_id = b_id
if last_support != support:
sig_feature = sig_features.setdefault(f_id, OrderedDict())
sig_browser = sig_feature.setdefault(str(b_id), [])
sig_browser.append(str(s_id))
last_support = support
# Order significant features
significant_changes = OrderedDict()
for f_id in chain([obj.id], [f.id for f in obj.child_features]):
significant_changes[str(f_id)] = sig_features.get(f_id, {})
return significant_changes
def browser_tabs(self, obj):
"""Section and order the browser tabs.
TODO: Move this logic into the database, API
"""
known_browsers = dict((
('chrome', ('Desktop Browsers', 1)),
('firefox', ('Desktop Browsers', 2)),
('internet_explorer', ('Desktop Browsers', 3)),
('opera', ('Desktop Browsers', 4)),
('safari', ('Desktop Browsers', 5)),
('android', ('Mobile Browsers', 6)),
('chrome_for_android', ('Mobile Browsers', 7)),
('chrome_mobile', ('Mobile Browsers', 8)),
('firefox_mobile', ('Mobile Browsers', 9)),
('ie_mobile', ('Mobile Browsers', 10)),
('opera_mini', ('Mobile Browsers', 11)),
('opera_mobile', ('Mobile Browsers', 12)),
('safari_mobile', ('Mobile Browsers', 13)),
('blackberry', ('Mobile Browsers', 14)),
('firefox_os', ('Non-Browser Environments', 15)),
))
next_other = 16
sections = [
'Desktop Browsers', 'Mobile Browsers', 'Non-Browser Environments']
raw_tabs = dict((section, []) for section in sections)
for browser in obj.all_browsers:
try:
section, order = known_browsers[browser.slug]
except KeyError:
section, order = ('Non-Browser Environments', next_other)
next_other += 1
raw_tabs[section].append((order, browser.id))
tabs = []
for section in sections:
browsers = raw_tabs[section]
if browsers:
browsers.sort()
tabs.append(OrderedDict((
('name', {'en': section}),
('browsers', [str(pk) for _, pk in browsers]),
)))
return tabs
def pagination(self, obj):
"""
Determine pagination for large feature trees.
If page children are not included (the default), then no pagination is
used, but the pagination object remains to make client logic easier.
"""
pagination = OrderedDict((
('previous', None),
('next', None),
))
if self.context['include_child_pages']:
# When full descendant list, use pagination
# The list can get huge when asking for root features like web-css
pagination['count'] = obj.descendant_count
url_kwargs = {'pk': obj.id}
if self.context['format']:
url_kwargs['format'] = self.context['format']
request = self.context['request']
url = reverse(
'viewfeatures-detail', kwargs=url_kwargs, request=request)
if obj.page_child_features.has_previous():
page = obj.page_child_features.previous_page_number()
pagination['previous'] = (
"%s?child_pages=1&page=%s" % (url, page))
if obj.page_child_features.has_next():
page = obj.page_child_features.next_page_number()
pagination['next'] = (
"%s?child_pages=1&page=%s" % (url, page))
else:
# Don't paginate results. The client probabaly wants to generate a
# complete table, so pagination would get in the way.
pagination['count'] = len(obj.child_features)
return {'linked.features': pagination}
def ordered_notes(self, obj, sig_features, tabs):
"""Gather notes from significant features."""
supports = dict(
[(str(support.id), support) for support in obj.all_supports])
notes = []
for browsers in sig_features.values():
for section in tabs:
for browser in section['browsers']:
sig_supports = browsers.get(browser, [])
for sig_support_pk in sig_supports:
support = supports[sig_support_pk]
if support.note:
notes.append(sig_support_pk)
return OrderedDict((note, i) for i, note in enumerate(notes, 1))
def get_meta(self, obj):
"""Assemble the metadata for the feature view."""
significant_changes = self.significant_changes(obj)
browser_tabs = self.browser_tabs(obj)
include_child_pages = self.context['include_child_pages']
pagination = self.pagination(obj)
languages = self.find_languages(obj)
notes = self.ordered_notes(
obj, significant_changes, browser_tabs)
meta = OrderedDict((
('compat_table', OrderedDict((
('supports', significant_changes),
('tabs', browser_tabs),
('child_pages', include_child_pages),
('pagination', pagination),
('languages', languages),
('notes', notes),
))),))
return meta
def to_internal_value(self, data):
self.instance = self.parent.instance
assert self.instance
self.add_sources(self.instance)
self.instance._in_extra = self.parent._in_extra
extra = FeatureExtra(data, self.instance, self.context)
if extra.is_valid():
return {'_view_extra': extra}
else:
assert extra.errors
raise ValidationError(extra.errors)
class Meta:
model = Feature
fields = (
'browsers', 'versions', 'supports', 'maturities',
'specifications', 'sections', 'features', 'meta')
class ViewFeatureSerializer(FeatureSerializer):
"""Feature Serializer, plus related data and MDN browser compat logic"""
_view_extra = ViewFeatureExtraSerializer(source='*')
class Meta(FeatureSerializer.Meta):
fields = FeatureSerializer.Meta.fields + ('_view_extra',)
def to_internal_value(self, data):
self._in_extra = {
'sections': data.pop('sections', []),
'supports': data.pop('supports', []),
'children': data.pop('children', []),
}
data = super(ViewFeatureSerializer, self).to_internal_value(data)
return data
def save(self, *args, **kwargs):
"""Save the feature plus linked elements.
The save is done using DRF conventions; the _view_extra field is set
to an object (FeatureExtra) that will same linked elements. The only
wrinkle is that the changeset should not be auto-closed by any saved
items.
"""
changeset = self.context['request'].changeset
if changeset.id:
# Already in an open changeset - client will close
close_changeset = False
else:
close_changeset = True
assert not changeset.user_id
changeset.user = self.context['request'].user
changeset.save()
ret = super(ViewFeatureSerializer, self).save(*args, **kwargs)
if hasattr(ret, '_view_extra'):
ret._view_extra.save(*args, **kwargs)
if close_changeset:
changeset.closed = True
changeset.save()
return ret
class ViewFeatureRowChildrenSerializer(ViewFeatureSerializer):
"""Adjust serializer when page children are omitted."""
children = PrimaryKeyRelatedField(
many=True, queryset=Feature.objects.all(), source='row_children')
| mpl-2.0 | -3,760,999,422,597,768,700 | 39.067602 | 79 | 0.587496 | false | 4.408223 | false | false | false |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/blob_circles.py | 1 | 2064 | def detect_circles(np_image):
"""
Uses Hough transform to detect the radii and the
centres of the "blobs" indicating the point of
contact between the spheres
"""
import numpy as np
import pylab as pl
from skimage.transform import hough_circle
from skimage.feature import peak_local_max
from skimage.draw import circle_perimeter
pl.close('all')
min_rad = int(max(np_image.shape[0], np_image.shape[1]) / 4.0)
max_rad = int(max(np_image.shape[0], np_image.shape[1]) / 2.0)
step = 1
hough_radii = np.arange(min_rad, max_rad, step, np.float64)
hough_res = hough_circle(np_image, hough_radii)
centers = []
accums = []
radii = []
circles = [] # to get the outlines of the circles
C = [] # to get the centres of the circles, in relation to the different areas
# For each radius, extract one circle
for radius, h in zip(hough_radii, hough_res):
peaks = peak_local_max(h, num_peaks=1)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius])
for idx in np.argsort(accums)[::-1][:1]:
center_x, center_y = centers[idx]
C.append((center_x, center_y))
radius = radii[idx]
cx, cy = circle_perimeter(center_y, center_x, np.int64(radius))
circles.append((cy, cx))
#np_image[circles[0][0], circles[0][1]] = 0
# pl.imshow(np_image)
# pl.title('Circle detection on real image using Hough transform\n- optimised with image labelling algorithm -', fontdict={'fontsize': 20,'verticalalignment': 'baseline','horizontalalignment': 'center'})
# pl.colorbar()
# pl.show()
# C_cp = C
# C = []
#
# if radius % 2 != 0:
# C.append((C_cp[0][0] + 0.5, C_cp[0][1] + 0.5))
# elif radius % 2 != 0:
# C.append((C_cp[0][0] + 0.5, C_cp[0][1]))
# elif radius % 2 != 0:
# C.append((C_cp[0][0], C_cp[0][1] + 0.5))
# else:
# C.append((C_cp[0][0], C_cp[0][1]))
return C
| apache-2.0 | -4,499,408,939,068,151,300 | 32.836066 | 207 | 0.573159 | false | 3 | false | false | false |
kawamon/hue | desktop/core/ext-py/defusedxml-0.5.0/setup.py | 2 | 1784 | #!/usr/bin/env python
from __future__ import absolute_import
import sys
from distutils.core import Command
import subprocess
from setuptools import setup
import defusedxml
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
errno = subprocess.call([sys.executable, "tests.py"])
raise SystemExit(errno)
long_description = []
with open("README.txt") as f:
long_description.append(f.read())
with open("CHANGES.txt") as f:
long_description.append(f.read())
setup(
name="defusedxml",
version=defusedxml.__version__,
cmdclass={"test": PyTest},
packages=["defusedxml"],
author="Christian Heimes",
author_email="[email protected]",
maintainer="Christian Heimes",
maintainer_email="[email protected]",
url="https://github.com/tiran/defusedxml",
download_url="https://pypi.python.org/pypi/defusedxml",
keywords="xml bomb DoS",
platforms="all",
license="PSFL",
description="XML bomb protection for Python stdlib modules",
long_description="\n".join(long_description),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Text Processing :: Markup :: XML",
],
)
| apache-2.0 | -5,821,807,950,419,019,000 | 27.31746 | 72 | 0.641256 | false | 4.018018 | false | false | false |
torymur/python-hubstorage | tests/test_batchuploader.py | 2 | 2377 | """
Test Project
"""
import time
from collections import defaultdict
from hstestcase import HSTestCase
from hubstorage import ValueTooLarge
class BatchUploaderTest(HSTestCase):
def _job_and_writer(self, **writerargs):
self.project.push_job(self.spidername)
job = self.start_job()
bu = self.hsclient.batchuploader
w = bu.create_writer(job.items.url, auth=self.auth, **writerargs)
return job, w
def test_writer_batchsize(self):
job, w = self._job_and_writer(size=10)
for x in xrange(111):
w.write({'x': x})
w.close()
# this works only for small batches (previous size=10 and small data)
# as internally HS may commit a single large request as many smaller
# commits, each with different timestamps
groups = defaultdict(int)
for doc in job.items.list(meta=['_ts']):
groups[doc['_ts']] += 1
self.assertEqual(len(groups), 12)
def test_writer_maxitemsize(self):
job, w = self._job_and_writer()
m = w.maxitemsize
self.assertRaisesRegexp(
ValueTooLarge,
'Value exceeds max encoded size of 1048576 bytes:'
' \'{"b": "x+\\.\\.\\.\'',
w.write, {'b': 'x' * m})
self.assertRaisesRegexp(
ValueTooLarge,
'Value exceeds max encoded size of 1048576 bytes:'
' \'{"b+\\.\\.\\.\'',
w.write, {'b'*m: 'x'})
self.assertRaisesRegexp(
ValueTooLarge,
'Value exceeds max encoded size of 1048576 bytes:'
' \'{"b+\\.\\.\\.\'',
w.write, {'b'*(m/2): 'x'*(m/2)})
def test_writer_contentencoding(self):
for ce in ('identity', 'gzip'):
job, w = self._job_and_writer(content_encoding=ce)
for x in xrange(111):
w.write({'x': x})
w.close()
self.assertEqual(job.items.stats()['totals']['input_values'], 111)
def test_writer_interval(self):
job, w = self._job_and_writer(size=1000, interval=1)
for x in xrange(111):
w.write({'x': x})
if x == 50:
time.sleep(2)
w.close()
groups = defaultdict(int)
for doc in job.items.list(meta=['_ts']):
groups[doc['_ts']] += 1
self.assertEqual(len(groups), 2)
| bsd-3-clause | 5,465,130,919,629,342,000 | 32.013889 | 78 | 0.543122 | false | 3.791069 | true | false | false |
robintw/Py6S | doc/source/conf.py | 1 | 8291 | # -*- coding: utf-8 -*-
#
# Py6S documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 16 12:07:44 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import mock
MOCK_MODULES = [
"numpy",
"scipy",
"matplotlib",
"matplotlib.pyplot",
"scipy.interpolate",
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, "C:\_Work\Py6S\py6s")
sys.path.insert(0, "/Users/robin/Documents/University/Py6S/py6s")
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Py6S"
copyright = "2012, Robin Wilson"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.9.0"
# The full version, including alpha/beta/rc tags.
release = "1.9.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Py6Sdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
"papersize": "a4paper",
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "Py6S.tex", "Py6S Documentation", "Robin Wilson", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "py6s", "Py6S Documentation", ["Robin Wilson"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Py6S",
"Py6S Documentation",
"Robin Wilson",
"Py6S",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"http://docs.python.org/": None}
autodoc_member_order = "bysource"
| lgpl-3.0 | 8,862,743,489,198,015,000 | 30.645038 | 84 | 0.693402 | false | 3.724618 | true | false | false |
codervikash/algorithms | Python/Graphs/kosaraju_strongly_connected_components.py | 1 | 2142 | """
A directed graph is strongly connected if there is a path between all pairs of vertices. A strongly connected component (SCC) of a directed graph is a maximal strongly connected subgraph.
Time Complexity: The above algorithm calls DFS, fins reverse of the graph and again calls DFS. DFS takes O(V+E) for a graph represented using adjacency list. Reversing a graph also takes O(V+E) time. For reversing the graph, we simple traverse all adjacency lists.
Time Complexity: O(V + E)
"""
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.V = vertices
self.graph = defaultdict(list)
def add_edge(self, u, v):
self.graph[u].append(v)
def fill_order(self, vertex, visited, stack):
visited[vertex] = True
for neighbour in self.graph[vertex]:
if visited[neighbour] == False:
self.fill_order(neighbour, visited, stack)
stack.append(vertex)
def get_traspose(self):
g = Graph(self.V)
for u in self.graph:
for v in self.graph[u]:
g.add_edge(v, u)
return g
def dfs_util(self, vertex, visited, curr_res):
visited[vertex] = True
curr_res.append(vertex)
for u in self.graph[vertex]:
if visited[u] == False:
self.dfs_util(u, visited, curr_res)
def get_strongly_connected_component(self):
stack = []
result = []
visited = [False for i in xrange(self.V)]
for u in xrange(self.V):
if visited[u] == False:
self.fill_order(u, visited, stack)
transposed_graph = self.get_traspose()
visited = [False for i in xrange(self.V)]
while stack:
vertex = stack.pop()
if visited[vertex] == False:
curr_res = []
transposed_graph.dfs_util(vertex, visited, curr_res)
result.append(curr_res)
return result
g = Graph(5)
g.add_edge(1, 0)
g.add_edge(0, 2)
g.add_edge(2, 1)
g.add_edge(0, 3)
g.add_edge(3, 4)
print g.get_strongly_connected_component()
| mit | 6,052,473,931,468,073,000 | 25.444444 | 264 | 0.598506 | false | 3.59396 | false | false | false |
PrincessTeruko/MHAGr | tools_(bakaneko)/transfer.py | 1 | 1492 | """
Copies names (value 1 in 1:2:3) from source file to target file
Requires manual use
"""
import os, sys
verbose = True # If True, print every replacement
pretend = False # If True, don't actually write
# The two files to be worked with
#source = "../../../mhag-read-only/src/org/mhag/model/data/mh3g/armor.dat"
#target = "../src/org/mhag/model/data/mh3g/armor.dat"
source = "../src/org/mhag/model/data/mh3g/armor.dat"
target = "../src/org/mhag/model/data/mh3g/armor_item.dat"
# Prevent cross contamination
try:
os.remove(source + ".new")
os.remove(target + ".new")
except OSError:
pass
# Read both files, line by line
with open(source) as f:
source_data = f.readlines()
with open(target) as f:
target_data = f.readlines()
# Generate list of armor names from source
source_names = []
for line in source_data:
# Exclude comments and blank lines
if line[0] != "#" and line[0] != "\n":
source_names.append(line.split(":")[0])
source_data = None
# Replace the names in the target
i = 0
for line in target_data:
# Exclude comments and blank lines
if line[0] != "#" and line[0] != "\n":
name = line.split(":")[0]
if verbose: print(name + "|" + source_names[i])
new_line = line.replace(name, source_names[i])
# Write replaced line to temporary file
with open(target + ".new", "a") as f:
f.write(new_line)
# Increment
i += 1
# Replace target file with temporary file
if not pretend:
os.remove(target)
os.rename(target + ".new", target)
| gpl-3.0 | -4,383,449,597,194,457,600 | 23.866667 | 74 | 0.664879 | false | 2.880309 | false | false | false |
ARISE-Initiative/robosuite | robosuite/models/arenas/wipe_arena.py | 1 | 7468 | import numpy as np
from robosuite.models.arenas import TableArena
from robosuite.utils.mjcf_utils import CustomMaterial, find_elements
from robosuite.models.objects import CylinderObject
class WipeArena(TableArena):
"""
Workspace that contains an empty table with visual markers on its surface.
Args:
table_full_size (3-tuple): (L,W,H) full dimensions of the table
table_friction (3-tuple): (sliding, torsional, rolling) friction parameters of the table
table_offset (3-tuple): (x,y,z) offset from center of arena when placing table.
Note that the z value sets the upper limit of the table
coverage_factor (float): Fraction of table that will be sampled for dirt placement
num_markers (int): Number of dirt (peg) particles to generate in a path on the table
table_friction_std (float): Standard deviation to sample for the peg friction
line_width (float): Diameter of dirt path trace
two_clusters (bool): If set, will generate two separate dirt paths with half the number of sensors in each
"""
def __init__(
self,
table_full_size=(0.8, 0.8, 0.05),
table_friction=(0.01, 0.005, 0.0001),
table_offset=(0, 0, 0.8),
coverage_factor=0.9,
num_markers=10,
table_friction_std=0,
line_width=0.02,
two_clusters=False
):
# Tactile table-specific features
self.table_friction_std = table_friction_std
self.line_width = line_width
self.markers = []
self.coverage_factor = coverage_factor
self.num_markers = num_markers
self.two_clusters = two_clusters
# Attribute to hold current direction of sampled dirt path
self.direction = None
# run superclass init
super().__init__(
table_full_size=table_full_size,
table_friction=table_friction,
table_offset=table_offset,
)
def configure_location(self):
"""Configures correct locations for this arena"""
# Run superclass first
super().configure_location()
# Define start position for drawing the line
pos = self.sample_start_pos()
# Define dirt material for markers
tex_attrib = {
"type": "cube",
}
mat_attrib = {
"texrepeat": "1 1",
"specular": "0.0",
"shininess": "0.0",
}
dirt = CustomMaterial(
texture="Dirt",
tex_name="dirt",
mat_name="dirt_mat",
tex_attrib=tex_attrib,
mat_attrib=mat_attrib,
shared=True,
)
# Define line(s) drawn on table
for i in range(self.num_markers):
# If we're using two clusters, we resample the starting position and direction at the halfway point
if self.two_clusters and i == int(np.floor(self.num_markers / 2)):
pos = self.sample_start_pos()
marker_name = f'contact{i}'
marker = CylinderObject(
name=marker_name,
size=[self.line_width / 2, 0.001],
rgba=[1, 1, 1, 1],
material=dirt,
obj_type="visual",
joints=None,
)
# Manually add this object to the arena xml
self.merge_assets(marker)
table = find_elements(root=self.worldbody, tags="body", attribs={"name": "table"}, return_first=True)
table.append(marker.get_obj())
# Add this marker to our saved list of all markers
self.markers.append(marker)
# Add to the current dirt path
pos = self.sample_path_pos(pos)
def reset_arena(self, sim):
"""
Reset the visual marker locations in the environment. Requires @sim (MjSim) reference to be passed in so that
the Mujoco sim can be directly modified
Args:
sim (MjSim): Simulation instance containing this arena and visual markers
"""
# Sample new initial position and direction for generated marker paths
pos = self.sample_start_pos()
# Loop through all visual markers
for i, marker in enumerate(self.markers):
# If we're using two clusters, we resample the starting position and direction at the halfway point
if self.two_clusters and i == int(np.floor(self.num_markers / 2)):
pos = self.sample_start_pos()
# Get IDs to the body, geom, and site of each marker
body_id = sim.model.body_name2id(marker.root_body)
geom_id = sim.model.geom_name2id(marker.visual_geoms[0])
site_id = sim.model.site_name2id(marker.sites[0])
# Determine new position for this marker
position = np.array([pos[0], pos[1], self.table_half_size[2]])
# Set the current marker (body) to this new position
sim.model.body_pos[body_id] = position
# Reset the marker visualization -- setting geom rgba alpha value to 1
sim.model.geom_rgba[geom_id][3] = 1
# Hide the default visualization site
sim.model.site_rgba[site_id][3] = 0
# Sample next values in local marker trajectory
pos = self.sample_path_pos(pos)
def sample_start_pos(self):
"""
Helper function to return sampled start position of a new dirt (peg) location
Returns:
np.array: the (x,y) value of the newly sampled dirt starting location
"""
# First define the random direction that we will start at
self.direction = np.random.uniform(-np.pi, np.pi)
return np.array(
(
np.random.uniform(
-self.table_half_size[0] * self.coverage_factor + self.line_width / 2,
self.table_half_size[0] * self.coverage_factor - self.line_width / 2),
np.random.uniform(
-self.table_half_size[1] * self.coverage_factor + self.line_width / 2,
self.table_half_size[1] * self.coverage_factor - self.line_width / 2)
)
)
def sample_path_pos(self, pos):
"""
Helper function to add a sampled dirt (peg) position to a pre-existing dirt path, whose most
recent dirt position is defined by @pos
Args:
pos (np.array): (x,y) value of most recent dirt position
Returns:
np.array: the (x,y) value of the newly sampled dirt position to add to the current dirt path
"""
# Random chance to alter the current dirt direction
if np.random.uniform(0, 1) > 0.7:
self.direction += np.random.normal(0, 0.5)
posnew0 = pos[0] + 0.005 * np.sin(self.direction)
posnew1 = pos[1] + 0.005 * np.cos(self.direction)
# We keep resampling until we get a valid new position that's on the table
while abs(posnew0) >= self.table_half_size[0] * self.coverage_factor - self.line_width / 2 or \
abs(posnew1) >= self.table_half_size[1] * self.coverage_factor - self.line_width / 2:
self.direction += np.random.normal(0, 0.5)
posnew0 = pos[0] + 0.005 * np.sin(self.direction)
posnew1 = pos[1] + 0.005 * np.cos(self.direction)
# Return this newly sampled position
return np.array((posnew0, posnew1))
| mit | 4,546,666,403,218,665,000 | 40.032967 | 117 | 0.588645 | false | 3.92021 | false | false | false |
abinit/abinit | scripts/post_processing/ElectronPhononCoupling/ElectronPhononCoupling/tests/test_LiF_g4.py | 2 | 2946 | from __future__ import print_function
from os.path import join as pjoin
from copy import copy
from . import SETest
from ..data.LiF_g4 import nqpt, wtq, fnames, refdir
class Test_LiF_g4(SETest):
common = dict(
temperature = False,
renormalization = False,
broadening = False,
self_energy = False,
spectral_function = False,
dynamical = True,
split_active = True,
double_grid = False,
write = True,
verbose = False,
nqpt = nqpt,
wtq = wtq,
smearing_eV = 0.01,
temp_range = [0, 300, 300],
omega_range = [-0.1, 0.1, 0.001],
rootname = 'epc.out',
**fnames)
@property
def refdir(self):
return refdir
# ZPR
def test_zpr_dyn(self):
"""Dynamical zero-point renormalization"""
self.run_compare_nc(
function = self.get_zpr_dyn,
key = 'zero_point_renormalization',
)
#def generate_zpr_dyn(self):
# """Generate epc data for this test."""
# return self.generate_test_ref(self.get_zpr_dyn)
def test_tdr_dyn(self):
"""Dynamical temperature dependent renormalization"""
self.run_compare_nc(
function = self.get_tdr_dyn,
key = 'temperature_dependent_renormalization',
)
# ZPR
def test_zpr_stat_mode(self):
"""Dynamical zero-point renormalization"""
self.run_compare_nc(
function = self.get_zpr_stat_mode,
key = 'zero_point_renormalization_by_modes',
)
def test_zpb_dyn(self):
"""Dynamical ZP Brd"""
self.run_compare_nc(
function = self.get_zpb_dyn,
key = 'zero_point_broadening',
)
def test_tdb_dyn(self):
"""Dynamical TD Brd"""
self.run_compare_nc(
function = self.get_tdb_dyn,
key = 'temperature_dependent_broadening',
)
def test_zpb_stat(self):
"""Static ZP Brd"""
self.run_compare_nc(
function = self.get_zpb_stat,
key = 'zero_point_broadening',
)
def test_tdb_stat(self):
"""Dynamical TD Brd"""
self.run_compare_nc(
function = self.get_tdb_stat,
key = 'temperature_dependent_broadening',
)
#def generate_tdr_dyn(self):
# return self.generate_test_ref(self.get_tdr_dyn)
# All
def generate(self):
"""Generate epc data for all tests."""
print('Generating reference data for tests in directory: {}'.format(
self.refdir))
for function in (
self.get_zpr_dyn,
self.get_tdr_dyn,
self.get_zpr_stat_mode,
self.get_zpb_dyn,
self.get_tdb_dyn,
self.get_zpb_stat,
self.get_tdb_stat,
):
self.generate_ref(function)
| gpl-3.0 | -3,998,620,104,432,975,400 | 25.540541 | 76 | 0.535642 | false | 3.494662 | true | false | false |
Applied-GeoSolutions/geokit | account/models.py | 1 | 1368 | from django.contrib.gis.db import models
from tenant_schemas.models import TenantMixin
from django.contrib.auth.models import User
import re
ACCESS_TYPES = (
('read', 'Read',),
('write', 'Write'),
('admin', 'Admin'),
)
SITE_STATUS = (
('active', 'Active'),
('disabled', 'Disabled'),
)
class Membership(models.Model):
user = models.ForeignKey(User)
access = models.CharField(max_length=10, choices=ACCESS_TYPES)
class GeoKitSite(TenantMixin):
RESERVED = [
'test', 'geokit', 'admin', 'public', 'topology', 'geometry', 'data',
'raster', 'template', 'schema_template'
]
user = models.ForeignKey(User)
name = models.CharField(max_length=100, null=True)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=15, choices=SITE_STATUS, default='active')
auto_create_schema = False
@classmethod
def is_allowed(cls, name):
m = re.match(r'^[a-z0-9]+$', name)
if m is None:
return False
return name not in cls.RESERVED
@classmethod
def is_available(cls, name):
return cls.is_allowed(name) and \
not cls.objects.filter(schema_name=name).exists()
def __unicode__(self):
return '%s - %s' % (self.user, self.name)
| gpl-2.0 | 8,578,746,440,716,939,000 | 24.333333 | 83 | 0.630848 | false | 3.553247 | false | false | false |
simvisage/oricreate | apps/sandbox/rch/ex005_triang2.py | 1 | 6880 | r'''
Calculate the derivatives of a dihedral angle.
'''
import numpy as np
from oricreate.api import CreasePatternState, CustomCPFactory
from oricreate.util.einsum_utils import \
DELTA, EPS
z_e = 0.5
def create_cp_factory():
cp = CreasePatternState(X=[[0, 0, 0],
[1, 1, 0],
[1, 0, 0],
[2, 0, 0]],
# L=[[0, 1],
# [1, 2],
# [2, 0],
# [1, 3],
# [3, 2]],
L=[[0, 2],
[1, 2],
[1, 0],
[2, 3],
[3, 1]],
F=[[0, 1, 2],
[1, 3, 2]]
)
cp_factory = CustomCPFactory(formed_object=cp)
return cp_factory
if __name__ == '__main__':
# end_doc
cp_factory = create_cp_factory()
cp = cp_factory.formed_object
vl = cp.iL_vectors
nl0, nl1 = np.einsum('fi...->if...', cp.iL_F_normals)
print('vl', vl.shape)
print(vl)
print('nl0', nl0.shape)
print(nl0)
print('nl1', nl1.shape)
print(nl1)
norm_vl = np.sqrt(np.einsum('...i,...i->...', vl, vl))
norm_nl0 = np.sqrt(np.einsum('...i,...i->...', nl0, nl0))
norm_nl1 = np.sqrt(np.einsum('...i,...i->...', nl1, nl1))
unit_vl = vl / norm_vl[:, np.newaxis]
unit_nl0 = nl0 / norm_nl0[:, np.newaxis]
unit_nl1 = nl1 / norm_nl1[:, np.newaxis]
print('unit_vl', unit_vl.shape)
print(unit_vl)
print('unit_nl0', unit_nl0.shape)
print(unit_nl0)
print('unit_nl1', unit_nl1.shape)
print(unit_nl1)
Tl0 = np.einsum('ij...->ji...',
np.array(
[unit_vl,
unit_nl0,
np.einsum('...j,...k,...ijk->...i',
unit_vl, unit_nl0, EPS)]
))
print('Tl0', Tl0.shape)
print(Tl0)
unit_nl01 = np.einsum('...ij,...j->...i', Tl0, unit_nl1)
print('unit_nl01[:,2]', unit_nl01[:, 2])
print(unit_nl01[:, 2])
psi = np.arcsin(unit_nl01[:, 2])
print('psi', psi)
print('L_vectors', cp.L_vectors.shape)
print(cp.L_vectors[1])
print('L_vectors_du', cp.L_vectors_dul.shape)
print(cp.L_vectors_dul[1])
print('iL_within_F0')
print(cp.iL_within_F0)
print('F_L_vectors_dul', cp.F_L_vectors_dul.shape)
print(cp.F_L_vectors_dul)
vl_dul = cp.iL_vectors_dul
nl0_dul0, nl1_dul1 = np.einsum('fi...->if...', cp.iL_F_normals_du)
print(cp.iL_N.shape)
print('vl_dul', vl_dul.shape)
print(vl_dul)
print('nl0_dul0', nl0_dul0.shape)
print(nl0_dul0)
print('nl1_dul1', nl1_dul1.shape)
print(nl1_dul1)
unit_nl0_dul0 = 1 / norm_nl0[:, np.newaxis, np.newaxis, np.newaxis] * (
nl0_dul0 -
np.einsum('...j,...i,...iNd->...jNd', unit_nl0, unit_nl0, nl0_dul0)
)
unit_nl1_dul1 = 1 / norm_nl1[:, np.newaxis, np.newaxis, np.newaxis] * (
nl1_dul1 -
np.einsum('...j,...i,...iNd->...jNd', unit_nl1, unit_nl1, nl1_dul1)
)
unit_vl_dul = 1 / norm_vl[:, np.newaxis, np.newaxis, np.newaxis] * (
vl_dul -
np.einsum('...j,...i,...iNd->...jNd', unit_vl, unit_vl, vl_dul)
)
print('unit_nl0_dul0', unit_nl0_dul0.shape)
print(unit_nl0_dul0)
print('unit_nl1_dul1', unit_nl1_dul1.shape)
print(unit_nl1_dul1)
print('unit_vl_dul', unit_vl_dul.shape)
print(unit_vl_dul)
Tl0_dul0 = np.einsum('ij...->ji...',
np.array([np.zeros_like(unit_nl0_dul0),
unit_nl0_dul0,
np.einsum(
'...j,...kNd,...ijk->...iNd',
unit_vl, unit_nl0_dul0, EPS)
]
))
print('Tl0_dul0', Tl0_dul0.shape)
print(Tl0_dul0)
Tl0_dul = np.einsum('ij...->ji...',
np.array([unit_vl_dul,
np.zeros_like(unit_vl_dul),
np.einsum(
'...jNd,...k,...ijk->...iNd',
unit_vl_dul, unit_nl0, EPS)
]
)
)
print('Tl0_dul0', Tl0_dul.shape)
print(Tl0_dul)
rho = 1 / np.sqrt((1 - unit_nl01[:, 2]**2))
print('rho', unit_nl01[:, 2])
unit_nl01_dul = np.einsum(
'...,...j,...ijNd->...iNd', rho, unit_nl1, Tl0_dul)[:, 2, ...]
unit_nl01_dul0 = np.einsum(
'...,...j,...ijNd->...iNd', rho, unit_nl1, Tl0_dul0)[:, 2, ...]
unit_nl01_dul1 = np.einsum(
'...,...jNd,...ij->...iNd', rho, unit_nl1_dul1, Tl0)[:, 2, ...]
print('unit_nl01_dul', unit_nl01_dul.shape)
print(unit_nl01_dul)
print('unit_nl01_dul0', unit_nl01_dul0.shape)
print(unit_nl01_dul0)
print('unit_nl01_dul1', unit_nl01_dul1.shape)
print(unit_nl01_dul1)
# get the map of facet nodes attached to interior lines
iL0_N_map = cp.F_N[cp.iL_F[:, 0]].reshape(cp.n_iL, -1)
iL1_N_map = cp.F_N[cp.iL_F[:, 1]].reshape(cp.n_iL, -1)
#iL_N_map = cp.iL_N
iL_N_map = cp.F_L_N[cp.iL_within_F0]
print('iL_N_map', iL_N_map.shape)
print(iL_N_map)
# enumerate the interior lines and broadcast it N and D into dimensions
iL_map = np.arange(cp.n_iL)[:, np.newaxis, np.newaxis]
# broadcast the facet node map into D dimension
l0_map = iL0_N_map[:, :, np.newaxis]
l1_map = iL1_N_map[:, :, np.newaxis]
l_map = iL_N_map[:, :, np.newaxis]
# broadcast the spatial dimension map into iL and N dimensions
D_map = np.arange(3)[np.newaxis, np.newaxis, :]
# allocate the gamma derivatives of iL with respect to N and D dimensions
psi_du = np.zeros((cp.n_iL, cp.n_N, cp.n_D), dtype='float_')
# add the contributions gamma_du from the left and right facet
# Note: this cannot be done in a single step since the incremental
# assembly is not possible within a single index expression.
psi_du[iL_map, l_map, D_map] += unit_nl01_dul
print('l_map', l_map.shape)
print(l_map)
print('psi_du', psi_du.shape)
print(psi_du)
psi_du[iL_map, l0_map, D_map] += unit_nl01_dul0
print('l0_map', l0_map.shape)
print(l0_map)
print('psi_du', psi_du.shape)
print(psi_du)
psi_du[iL_map, l1_map, D_map] += unit_nl01_dul1
print('l1_map', l1_map.shape)
print(l1_map)
print('psi_du', psi_du.shape)
print(psi_du)
| gpl-3.0 | 5,509,409,985,829,546,000 | 31.91866 | 77 | 0.467151 | false | 2.877457 | false | false | false |
namiszh/fba | Toolkit/yahoo_import_pre_draft_ranks.py | 1 | 7692 | #!/usr/bin/python
from pandas import DataFrame
from selenium import webdriver
from selenium.webdriver import PhantomJS
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import click
import pandas as pd
import re
import time
@click.command()
@click.option('--u', prompt='Your Yahoo username', help='Your Yahoo account username')
@click.option('--p', hide_input=True, prompt='Your Yahoo password', help='Your Yahoo account password')
@click.option('--l', type=int, prompt='Your yahoo league id', help='Your yahoo league id')
@click.option('--f', prompt='your player value csv file', help='your player value csv file')
@click.option('--n', type=int, default=300, prompt='The number of players you\'d like to rank', help='top number players you\' like to rank')
@click.option('--h', type=bool, default=True, prompt='Do you want to run in headless mode? [True|False]', help='If True you won\'t see what\'s going on while it\'s running. If false you will see the browser render the steps.')
def import_player_ranks(u, p, l, f, n, h):
"""Given a csv file that has player values, Set pre draft player values for a yahoo fantasy basketball league."""
# read player values from csv file.
print('reading player ranks from csv file...')
df = pd.read_csv(f, encoding = "ISO-8859-1")
player_list = []
names = df[df.columns[0]].tolist()
for name in names:
name = name.replace(".", "") # C.J. McCollum -> CJ McCollum
name = name.replace(",", "") # Dennis Smith, Jr.
match = re.search(r'^(\S+\s\S+)(\s\S+)*$', name) # Larry Nance Jr. -> Larry Nance, Glen Robinson III -> Glen Robinson
name = match.group(1)
# print(name)
player_list.append(name)
# get selenium web driver
if h:
DesiredCapabilities.PHANTOMJS['phantomjs.page.settings.userAgent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:16.0) Gecko/20121026 Firefox/16.0'
driver = webdriver.PhantomJS()
else:
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--dns-prefetch-disable")
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_window_size(1920, 1080)
driver.maximize_window()
# login into yahoo
print('login into yahoo as {}'.format(u))
driver.get('https://login.yahoo.com/?.src=fantasy&specId=usernameRegWithName&.intl=us&.lang=en-US&authMechanism=primary&yid=&done=https%3A%2F%2Fbasketball.fantasysports.yahoo.com%2Fnba%2F%3Futmpdku%3D1&eid=100&add=1')
delay = 8 # seconds
WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.ID, 'login-username'))).send_keys(u)
driver.find_element_by_id('login-signin').click()
WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.ID, 'login-passwd'))).send_keys(p)
driver.find_element_by_id('login-signin').click()
# make sure the 'My Teams and Leagues' Table is loaded
WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.ID, "gamehome-teams")))
# find all leagues and teams
print('find all leagues and teams')
userTeamElements = driver.find_elements_by_xpath("//div[@class='Grid-table']//a[@class='Block Fz-sm Phone-fz-xs Pbot-xs']")
# get the url of pre draft value rank for this league
team_url = None
for teamElement in userTeamElements:
user_team_url = teamElement.get_attribute("href")
match = re.search(r'/nba/(\d+)/(\d+)/?$', user_team_url)
match_league_id = int(match.group(1))
if match_league_id == l:
team_url = user_team_url
break;
if team_url is None:
print('cannot find league id={}'.format( l))
return
# there are usually about 600 players, we set count to 800 to make all players can display in one page
pre_draft_value_url = team_url + '/editprerank?count=800'
print('set pre draft values in {}'.format(pre_draft_value_url))
driver.get(pre_draft_value_url)
# first click 'Start Over'
print('click "Start Over"')
WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.XPATH, '//button[contains(@class, "reset-roster-btn")]'))).click()
alert = driver.switch_to.alert
alert.accept()
# save result
WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.ID, 'submit-editprerank'))).click()
time.sleep(2)
# then click 'load all' to load all pages
# click save would reset the status(count =800), so loading all players explicitly again.
driver.get(pre_draft_value_url)
print('Load all players')
loadAllEle = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.LINK_TEXT, 'Load More Players')))
hov = ActionChains(driver).move_to_element(loadAllEle)
hov.perform()
time.sleep(2)
ActionChains(driver).move_to_element(loadAllEle).click(loadAllEle).perform()
time.sleep(5)
playerElements = driver.find_elements_by_xpath('//ul[@id="all_player_list"]//li//span//div[contains(@class, "playersimple-adddrop")]//span[@class="Bfc"]//span[2]')
plusElements = driver.find_elements_by_xpath('//ul[@id="all_player_list"]//li//span//div[contains(@class, "playersimple-adddrop")]//div[@class="Fl-end"]//span[2]')
print('There are {} players in the table.'.format(len(playerElements)))
name_to_ele_map = {}
for plyaerEle, plusEle in zip(playerElements, plusElements):
player_name = plyaerEle.text.replace(".", "") # C.J. McCollum -> CJ McCollum
player_name = player_name.replace(",", "") # Dennis Smith, Jr.
match = re.search(r'^(\S+\s\S+)(\s\S+)*$', player_name) # Larry Nance Jr. -> Larry Nance, Glen Robinson III -> Glen Robinson
player_name = match.group(1)
# print(player_name)
name_to_ele_map[player_name] = plusEle
print('Set player ranks...')
for i, player_name in enumerate(player_list, start = 1):
# just need to rank the top n players
if i>n:
break
# special cases
if player_name not in name_to_ele_map:
if player_name == 'Guillermo Hernangomez':
player_name = 'Willy Hernangomez'
elif player_name == 'Juan Hernangomez':
player_name = 'Juancho Hernangomez'
elif player_name == 'Moe Harkless':
player_name = 'Maurice Harkless'
if player_name not in name_to_ele_map:
if i == 1:
print('***** Cannot find player {} in the table, please check the name and add it to the top manually *****'.format(player_name))
else:
print('***** Cannot find player {} in the table, please check the name and add it to the #{} position, just after {} *****'.format(player_name, i, player_list[i-2]))
continue
webEle = name_to_ele_map[player_name]
hov = ActionChains(driver).move_to_element(webEle)
hov.perform()
# time.sleep(2)
ActionChains(driver).move_to_element(webEle).click(webEle).perform()
# save result
WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.ID, 'submit-editprerank'))).click()
time.sleep(2)
# show result
print('show result')
driver.get(team_url + '/prerank')
time.sleep(60)
driver.quit()
if __name__ == '__main__':
import_player_ranks()
| mit | 2,991,500,479,918,677,500 | 46.190184 | 226 | 0.655486 | false | 3.354557 | false | false | false |
ooici/marine-integrations | mi/dataset/parser/dosta_abcdjm_mmp_cds.py | 1 | 3302 | #!/usr/bin/env python
"""
@package mi.dataset.parser.dosta_abcdjm_mmp_cds
@file marine-integrations/mi/dataset/parser/dosta_abcdjm_mmp_cds.py
@author Mark Worden
@brief Parser for the DostaAbcdjmMmpCds dataset driver
Release notes:
initial release
"""
__author__ = 'Mark Worden'
__license__ = 'Apache 2.0'
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.dataset.parser.mmp_cds_base import MmpCdsParserDataParticle, MmpCdsParser
class DataParticleType(BaseEnum):
INSTRUMENT = 'dosta_abcdjm_mmp_cds_instrument'
class DostaAbcdjmMmpCdsParserDataParticleKey(BaseEnum):
CALIBRATED_PHASE = 'calibrated_phase'
OPTODE_TEMPERATURE = 'optode_temperature'
class DostaAbcdjmMmpCdsParserDataParticle(MmpCdsParserDataParticle):
"""
Class for parsing data from the DostaAbcdjmMmpCds data set
"""
_data_particle_type = DataParticleType.INSTRUMENT
def _get_mmp_cds_subclass_particle_params(self, dict_data):
"""
This method is required to be implemented by classes that extend the MmpCdsParserDataParticle class.
This implementation returns the particle parameters specific for DostaAbcdjmMmpCds. As noted in the
base, it is okay to allow the following exceptions to propagate: ValueError, TypeError, IndexError, KeyError.
@returns a list of particle params specific to DostaAbcdjmMmpCds
"""
calibrated_phase = self._encode_value(DostaAbcdjmMmpCdsParserDataParticleKey.CALIBRATED_PHASE,
dict_data['doconcs'], float)
optode_temperature = self._encode_value(DostaAbcdjmMmpCdsParserDataParticleKey.OPTODE_TEMPERATURE,
dict_data['t'], float)
subclass_particle_params = [calibrated_phase, optode_temperature]
return subclass_particle_params
class DostaAbcdjmMmpCdsParser(MmpCdsParser):
"""
Class for parsing data obtain from a DOSTA sensor, series A, B, C, D, J and M, as received from a McLane Moored
Profiler connected to the cabled docking station.
"""
def __init__(self,
config,
state,
stream_handle,
state_callback,
publish_callback,
*args, **kwargs):
"""
This method is a constructor that will instantiate a DostaAbcdjmMmpCdsParser object.
@param config The configuration for this MmpCdsParser parser
@param state The state the DostaAbcdjmMmpCdsParser should use to initialize itself
@param stream_handle The handle to the data stream containing the MmpCds data
@param state_callback The function to call upon detecting state changes
@param publish_callback The function to call to provide particles
"""
# Call the superclass constructor
super(DostaAbcdjmMmpCdsParser, self).__init__(config,
state,
stream_handle,
state_callback,
publish_callback,
*args, **kwargs) | bsd-2-clause | 7,201,116,402,367,147,000 | 37.406977 | 117 | 0.633858 | false | 3.978313 | false | false | false |
TraurigeNarr/ThirdParties | assimp-3.2/port/PyAssimp/gen/structsgen.py | 44 | 9543 | #!/usr/bin/env python
# -*- Coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# Open Asset Import Library (ASSIMP)
# ---------------------------------------------------------------------------
#
# Copyright (c) 2006-2010, ASSIMP Development Team
#
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# * Neither the name of the ASSIMP team, nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior
# written permission of the ASSIMP Development Team.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---------------------------------------------------------------------------
"""Update PyAssimp's data structures to keep up with the
C/C++ headers.
This script is meant to be executed in the source tree, directly from
port/PyAssimp/gen
"""
import os
import re
#==[regexps]=================================================
# Clean desc
REdefine = re.compile(r''
r'(?P<desc>)' # /** *desc */
r'#\s*define\s(?P<name>[^(\n]+?)\s(?P<code>.+)$' # #define name value
, re.MULTILINE)
# Get structs
REstructs = re.compile(r''
#r'//\s?[\-]*\s(?P<desc>.*?)\*/\s' # /** *desc */
#r'//\s?[\-]*(?P<desc>.*?)\*/(?:.*?)' # garbage
r'//\s?[\-]*\s(?P<desc>.*?)\*/\W*?' # /** *desc */
r'struct\s(?:ASSIMP_API\s)?(?P<name>[a-z][a-z0-9_]\w+\b)' # struct name
r'[^{]*?\{' # {
r'(?P<code>.*?)' # code
r'\}\s*(PACK_STRUCT)?;' # };
, re.IGNORECASE + re.DOTALL + re.MULTILINE)
# Clean desc
REdesc = re.compile(r''
r'^\s*?([*]|/\*\*)(?P<line>.*?)' # * line
, re.IGNORECASE + re.DOTALL + re.MULTILINE)
# Remove #ifdef __cplusplus
RErmifdef = re.compile(r''
r'#ifdef __cplusplus' # #ifdef __cplusplus
r'(?P<code>.*)' # code
r'#endif(\s*//\s*!?\s*__cplusplus)*' # #endif
, re.IGNORECASE + re.DOTALL)
# Replace comments
RErpcom = re.compile(r''
r'\s*(/\*+\s|\*+/|\B\*\s|///?!?)' # /**
r'(?P<line>.*?)' # * line
, re.IGNORECASE + re.DOTALL)
# Restructure
def GetType(type, prefix='c_'):
t = type
while t.endswith('*'):
t = t[:-1]
if t[:5] == 'const':
t = t[5:]
# skip some types
if t in skiplist:
return None
t = t.strip()
types = {'unsigned int':'uint', 'unsigned char':'ubyte',}
if t in types:
t = types[t]
t = prefix + t
while type.endswith('*'):
t = "POINTER(" + t + ")"
type = type[:-1]
return t
def restructure( match ):
type = match.group("type")
if match.group("struct") == "":
type = GetType(type)
elif match.group("struct") == "C_ENUM ":
type = "c_uint"
else:
type = GetType(type[2:], '')
if type is None:
return ''
if match.group("index"):
type = type + "*" + match.group("index")
result = ""
for name in match.group("name").split(','):
result += "(\"" + name.strip() + "\", "+ type + "),"
return result
RErestruc = re.compile(r''
r'(?P<struct>C_STRUCT\s|C_ENUM\s|)' # [C_STRUCT]
r'(?P<type>\w+\s?\w+?[*]*)\s' # type
#r'(?P<name>\w+)' # name
r'(?P<name>\w+|[a-z0-9_, ]+)' # name
r'(:?\[(?P<index>\w+)\])?;' # []; (optional)
, re.DOTALL)
#==[template]================================================
template = """
class $NAME$(Structure):
\"\"\"
$DESCRIPTION$
\"\"\"
$DEFINES$
_fields_ = [
$FIELDS$
]
"""
templateSR = """
class $NAME$(Structure):
\"\"\"
$DESCRIPTION$
\"\"\"
$DEFINES$
$NAME$._fields_ = [
$FIELDS$
]
"""
skiplist = ("FileIO", "File", "locateFromAssimpHeap",'LogStream','MeshAnim','AnimMesh')
#============================================================
def Structify(fileName):
file = open(fileName, 'r')
text = file.read()
result = []
# Get defines.
defs = REdefine.findall(text)
# Create defines
defines = "\n"
for define in defs:
# Clean desc
desc = REdesc.sub('', define[0])
# Replace comments
desc = RErpcom.sub('#\g<line>', desc)
defines += desc
if len(define[2].strip()):
# skip non-integral defines, we can support them right now
try:
int(define[2],0)
except:
continue
defines += " "*4 + define[1] + " = " + define[2] + "\n"
# Get structs
rs = REstructs.finditer(text)
fileName = os.path.basename(fileName)
print fileName
for r in rs:
name = r.group('name')[2:]
desc = r.group('desc')
# Skip some structs
if name in skiplist:
continue
text = r.group('code')
# Clean desc
desc = REdesc.sub('', desc)
desc = "See '"+ fileName +"' for details." #TODO
# Remove #ifdef __cplusplus
text = RErmifdef.sub('', text)
# Whether the struct contains more than just POD
primitive = text.find('C_STRUCT') == -1
# Restructure
text = RErestruc.sub(restructure, text)
# Replace comments
text = RErpcom.sub('# \g<line>', text)
text = text.replace("),#", "),\n#")
text = text.replace("#", "\n#")
text = "".join([l for l in text.splitlines(True) if not l.strip().endswith("#")]) # remove empty comment lines
# Whether it's selfreferencing: ex. struct Node { Node* parent; };
selfreferencing = text.find('POINTER('+name+')') != -1
complex = name == "Scene"
# Create description
description = ""
for line in desc.split('\n'):
description += " "*4 + line.strip() + "\n"
description = description.rstrip()
# Create fields
fields = ""
for line in text.split('\n'):
fields += " "*12 + line.strip() + "\n"
fields = fields.strip()
if selfreferencing:
templ = templateSR
else:
templ = template
# Put it all together
text = templ.replace('$NAME$', name)
text = text.replace('$DESCRIPTION$', description)
text = text.replace('$FIELDS$', fields)
if ((name.lower() == fileName.split('.')[0][2:].lower()) and (name != 'Material')) or name == "String":
text = text.replace('$DEFINES$', defines)
else:
text = text.replace('$DEFINES$', '')
result.append((primitive, selfreferencing, complex, text))
return result
text = "#-*- coding: UTF-8 -*-\n\n"
text += "from ctypes import POINTER, c_int, c_uint, c_size_t, c_char, c_float, Structure, c_char_p, c_double, c_ubyte\n\n"
structs1 = ""
structs2 = ""
structs3 = ""
structs4 = ""
path = '../../../include/assimp'
files = os.listdir (path)
#files = ["aiScene.h", "aiTypes.h"]
for fileName in files:
if fileName.endswith('.h'):
for struct in Structify(os.path.join(path, fileName)):
primitive, sr, complex, struct = struct
if primitive:
structs1 += struct
elif sr:
structs2 += struct
elif complex:
structs4 += struct
else:
structs3 += struct
text += structs1 + structs2 + structs3 + structs4
file = open('structs.py', 'w')
file.write(text)
file.close()
print("Generation done. You can now review the file 'structs.py' and merge it.")
| gpl-2.0 | -8,412,571,633,829,338,000 | 31.906897 | 122 | 0.488421 | false | 3.933636 | false | false | false |
jakobzhao/wbcrawler3 | generate_sematic_network.py | 1 | 1468 | # !/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created on Oct 16, 2015
# @author: Bo Zhao
# @email: [email protected]
# @website: http://yenching.org
# @organization: Harvard Kennedy School
from wbcrawler.sna import generate_sematic_network
# generate_sematic_network(keywords=[u'社保', u'社会保险'], depth=[10, 10, 10], w2v_file=u'insurance/w2v.bin', gexf_file=u"insurance/社保.gexf")
# generate_sematic_network(keywords=[u'延退', u'延迟', u'65'], depth=[10, 10, 10], w2v_file=u'insurance/w2v.bin', gexf_file=u"insurance/延迟.gexf") # u'社保亏空'
# generate_sematic_network(keywords=[u'公积金'], depth=[10, 10, 10], w2v_file=u'insurance/w2v.bin', gexf_file=u"insurance/公积金.gexf")
# generate_sematic_network(keywords=[u'报销'], depth=[10, 10, 10], w2v_file=u'insurance/w2v.bin', gexf_file=u"insurance/报销.gexf")
# generate_sematic_network(keywords=[u'二孩', u'二胎'], depth=[10, 10, 10], w2v_file=u'insurance/w2v.bin', gexf_file=u"five/二孩.gexf")
# generate_sematic_network(keywords=[u'二孩', u'二胎'], depth=[10, 10, 10], w2v_file=u'insurance/w2v.bin', gexf_file=u"five/二孩.gexf")
# generate_sematic_network(keywords=[u'商业保险'], depth=[10, 10, 10], w2v_file=u'insurance/w2v.bin', gexf_file=u"insurance/商险.gexf")
# generate_sematic_network(keywords=[u'亏空', u'缺口', u'财政补贴'], depth=[10, 10, 1], w2v_file=u'insurance/w2v.bin', gexf_file=u"insurance/社保亏空2.gexf")
| mit | -343,822,011,936,169,540 | 66.6 | 152 | 0.686391 | false | 2.011905 | false | true | false |
Rademade/taiga-back | taiga/events/apps.py | 3 | 1618 | # Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from django.apps import AppConfig
from django.db.models import signals
def connect_events_signals():
from . import signal_handlers as handlers
signals.post_save.connect(handlers.on_save_any_model, dispatch_uid="events_change")
signals.post_delete.connect(handlers.on_delete_any_model, dispatch_uid="events_delete")
def disconnect_events_signals():
from . import signal_handlers as handlers
signals.post_save.disconnect(dispatch_uid="events_change")
signals.post_delete.disconnect(dispatch_uid="events_delete")
class EventsAppConfig(AppConfig):
name = "taiga.events"
verbose_name = "Events App Config"
def ready(self):
connect_events_signals()
| agpl-3.0 | 4,559,636,659,529,954,300 | 38.414634 | 91 | 0.755569 | false | 3.607143 | false | false | false |
radical-cybertools/ExTASY | examples/coam-on-archer/kernel_defs/tleap.py | 1 | 4163 | #!/usr/bin/env python
"""A kernel that creates a new ASCII file with a given size and name.
"""
__author__ = "The ExTASY project <[email protected]>"
__copyright__ = "Copyright 2015, http://www.extasy-project.org/"
__license__ = "MIT"
from copy import deepcopy
from radical.ensemblemd.exceptions import ArgumentError
from radical.ensemblemd.exceptions import NoKernelConfigurationError
from radical.ensemblemd.engine import get_engine
from radical.ensemblemd.kernel_plugins.kernel_base import KernelBase
# ------------------------------------------------------------------------------
_KERNEL_INFO = {
"name": "custom.tleap",
"description": "Creates a new file of given size and fills it with random ASCII characters.",
"arguments": {
"--numofsims=":
{
"mandatory": True,
"description": "No. of frontpoints = No. of simulation CUs"
},
"--cycle=":
{
"mandatory": True,
"description": "Output filename for postexec"
}
},
"machine_configs":
{
"*": {
"environment" : {"FOO": "bar"},
"pre_exec" : [],
"executable" : "python",
"uses_mpi" : False
},
"xsede.stampede":
{
"environment" : {},
"pre_exec" : [
"module load TACC",
"module load intel/13.0.2.146",
"module load python/2.7.9",
"module load netcdf/4.3.2",
"module load hdf5/1.8.13",
"export AMBERHOME=/opt/apps/intel13/mvapich2_1_9/amber/12.0",
"export PYTHONPATH=//work/02998/ardi/coco_installation/lib/python2.7/site-packages:$PYTHONPATH",
"export PATH=/work/02998/ardi/coco_installation/bin:$AMBERHOME/bin:$PATH"],
"executable" : ["python"],
"uses_mpi" : False
},
"epsrc.archer":
{
"environment" : {},
"pre_exec" : [
"module load python-compute/2.7.6",
"module load pc-numpy",
"module load pc-scipy",
"module load pc-coco",
"module load pc-netcdf4-python",
"module load amber"],
"executable" : ["python"],
"uses_mpi" : False
},
}
}
# ------------------------------------------------------------------------------
#
class kernel_tleap(KernelBase):
def __init__(self):
super(kernel_tleap, self).__init__(_KERNEL_INFO)
"""Le constructor."""
# --------------------------------------------------------------------------
#
@staticmethod
def get_name():
return _KERNEL_INFO["name"]
def _bind_to_resource(self, resource_key):
"""(PRIVATE) Implements parent class method.
"""
if resource_key not in _KERNEL_INFO["machine_configs"]:
if "*" in _KERNEL_INFO["machine_configs"]:
# Fall-back to generic resource key
resource_key = "*"
else:
raise NoKernelConfigurationError(kernel_name=_KERNEL_INFO["name"], resource_key=resource_key)
cfg = _KERNEL_INFO["machine_configs"][resource_key]
executable = cfg["executable"]
arguments = ['postexec.py','{0}'.format(self.get_arg("--numofsims=")),'{0}'.format(self.get_arg("--cycle="))]
self._executable = executable
self._arguments = arguments
self._environment = cfg["environment"]
self._uses_mpi = cfg["uses_mpi"]
self._pre_exec = cfg["pre_exec"]
self._post_exec = None
# ------------------------------------------------------------------------------
| mit | 5,601,111,011,044,209,000 | 34.581197 | 124 | 0.444871 | false | 4.49568 | true | false | false |
soarpenguin/ansible | lib/ansible/modules/storage/purestorage/purefa_hg.py | 7 | 5209 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley ([email protected])
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: purefa_hg
version_added: "2.4"
short_description: Create, Delete and Modify hostgroups on Pure Storage FlashArray
description:
- This module creates, deletes or modifies hostgroups on Pure Storage FlashArray.
author: Simon Dodsley (@simondodsley)
options:
hostgroup:
description:
- Host Name.
required: true
state:
description:
- Creates or modifies hostgroup.
required: false
default: present
choices: [ "present", "absent" ]
host:
description:
- List of existing hosts to add to hostgroup.
required: false
volume:
description:
- List of existing volumes to add to hostgroup.
required: false
extends_documentation_fragment:
- purestorage
'''
EXAMPLES = '''
- name: Create new hostgroup
purefa_hg:
hostgroup: foo
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Delete hostgroup - this will disconnect all hosts and volume in the hostgroup
purefa_hg:
hostgroup: foo
state: absent
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Create host group with hosts and volumes
purefa_hg:
hostgroup: bar
host:
- host1
- host2
volume:
- vol1
- vol2
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
HAS_PURESTORAGE = True
try:
from purestorage import purestorage
except ImportError:
HAS_PURESTORAGE = False
def get_hostgroup(module, array):
hostgroup = None
for h in array.list_hgroups():
if h["name"] == module.params['hostgroup']:
hostgroup = h
break
return hostgroup
def make_hostgroup(module, array):
changed = True
if not module.check_mode:
host = array.create_hgroup(module.params['hostgroup'])
if module.params['host']:
array.set_hgroup(module.params['hostgroup'], hostlist=module.params['host'])
if module.params['volume']:
for v in module.params['volume']:
array.connect_hgroup(module.params['hostgroup'], v)
module.exit_json(changed=changed)
def update_hostgroup(module, array):
changed = False
hostgroup = module.params['hostgroup']
module.exit_json(changed=changed)
def delete_hostgroup(module, array):
changed = True
if not module.check_mode:
for vol in array.list_hgroup_connections(module.params['hostgroup']):
array.disconnect_hgroup(module.params['hostgroup'], vol["vol"])
host = array.get_hgroup(module.params['hostgroup'])
array.set_hgroup(module.params['hostgroup'], remhostlist=host['hosts'])
array.delete_hgroup(module.params['hostgroup'])
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(
dict(
hostgroup=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
host=dict(type='list'),
volume=dict(type='list'),
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_PURESTORAGE:
module.fail_json(msg='purestorage sdk is required for this module in host')
state = module.params['state']
array = get_system(module)
hostgroup = get_hostgroup(module, array)
if module.params['host']:
try:
for h in module.params['host']:
array.get_host(h)
except:
module.fail_json(msg='Host not found')
if module.params['volume']:
try:
for v in module.params['volume']:
array.get_volume(v)
except:
module.fail_json(msg='Volume not found')
if hostgroup and state == 'present':
update_hostgroup(module, array)
elif hostgroup and state == 'absent':
delete_hostgroup(module, array)
elif hostgroup is None and state == 'absent':
module.exit_json(changed=False)
else:
make_hostgroup(module, array)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,106,317,457,866,757,000 | 26.560847 | 88 | 0.650989 | false | 3.728704 | false | false | false |
kav2k/AoC | 2015/03/solver.py | 1 | 2120 | """AoC 2015.03 problem solver.
Takes input from STDIN by default.
(c) Alexander Kashev, 2017
"""
import sys
def chars(file, chunkSize=4096):
"""
Take a file object, read it in chuncks and iterate over it one character at a time.
Keyword arguments:
file --- a file object to iterate over
chunkSize --- buffer size for file reads (default=4096)
"""
chunk = file.read(chunkSize)
while chunk:
for char in chunk:
yield char
chunk = file.read(chunkSize)
def move(position, instruction):
"""
Take a position and offset it based on instuction, or raise an error on invalid instruction.
Keyword arguments:
position --- current position as a tuple (x,y)
Instruction --- single-character instruction to move in ["^", "v", ">", "<"]
"""
if instruction == "^":
return (position[0], position[1] + 1)
elif instruction == "v":
return (position[0], position[1] - 1)
elif instruction == ">":
return (position[0] + 1, position[1])
elif instruction == "<":
return (position[0] - 1, position[1])
else:
raise ValueError("Instruction '{}' not recognized".format(instruction))
def solver(file):
"""
Take a file object with input and solve AoC 2015.03 problem on the input.
Keyword arguments:
file --- a file object to read input from
"""
alone = set([(0, 0)])
alone_position = (0, 0)
together = set([(0, 0)])
santa_position = (0, 0)
robot_position = (0, 0)
robot_turn = False
for instruction in chars(file):
alone_position = move(alone_position, instruction)
alone.add(alone_position)
if robot_turn:
robot_position = move(robot_position, instruction)
together.add(robot_position)
else:
santa_position = move(santa_position, instruction)
together.add(santa_position)
robot_turn = not robot_turn
return (len(alone), len(together))
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: Santa alone will deliver presents to {} houses.".format(solution[0]))
print("Part B: Santa and Robo-Santa will deliver presents to {} houses.".format(solution[1]))
| mit | 162,078,325,013,108,350 | 25.5 | 95 | 0.654245 | false | 3.617747 | false | false | false |
pizzathief/scipy | scipy/signal/tests/test_spectral.py | 9 | 53294 | import numpy as np
from numpy.testing import (assert_, assert_approx_equal,
assert_allclose, assert_array_equal, assert_equal,
assert_array_almost_equal_nulp, suppress_warnings)
import pytest
from pytest import raises as assert_raises
from scipy import signal
from scipy.fft import fftfreq
from scipy.signal import (periodogram, welch, lombscargle, csd, coherence,
spectrogram, stft, istft, check_COLA, check_NOLA)
from scipy.signal.spectral import _spectral_helper
class TestPeriodogram(object):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_odd(self):
x = np.zeros(15)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 1/16.0)
q[0] = 0
assert_allclose(p, q)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, scaling='spectrum')
g, q = periodogram(x, scaling='density')
assert_allclose(f, np.linspace(0, 0.5, 9))
assert_allclose(p, q/16.0)
def test_integer_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_integer_odd(self):
x = np.zeros(15, dtype=int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 1/16.0)
q[0] = 0
assert_allclose(p, q)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 5.0/16.0)
q[0] = 0
assert_allclose(p, q)
def test_unk_scaling(self):
assert_raises(ValueError, periodogram, np.zeros(4, np.complex128),
scaling='foo')
def test_nd_axis_m1(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((2,1,10))
x[:,:,0] = 1.0
f, p = periodogram(x)
assert_array_equal(p.shape, (2, 1, 6))
assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60)
f0, p0 = periodogram(x[0,0,:])
assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60)
def test_nd_axis_0(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((10,2,1))
x[0,:,:] = 1.0
f, p = periodogram(x, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60)
f0, p0 = periodogram(x[:,0,0])
assert_array_almost_equal_nulp(p0, p[:,1,0])
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, 10, 'hann')
win = signal.get_window('hann', 16)
fe, pe = periodogram(x, 10, win)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
win_err = signal.get_window('hann', 32)
assert_raises(ValueError, periodogram, x,
10, win_err) # win longer than signal
def test_padded_fft(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
fp, pp = periodogram(x, nfft=32)
assert_allclose(f, fp[::2])
assert_allclose(p, pp[::2])
assert_array_equal(pp.shape, (17,))
def test_empty_input(self):
f, p = periodogram([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = periodogram(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = periodogram(np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_nfft(self):
x = np.zeros(18)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_nfft_is_xshape(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9, 'f')
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(15, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8, 'f')
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 1/16.0, 'f')
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 5.0/16.0, 'f')
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
class TestWelch(object):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_onesided_odd(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, scaling='spectrum')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
0.02083333])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.38194444])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_unk_scaling(self):
assert_raises(ValueError, welch, np.zeros(4, np.complex128),
scaling='foo', nperseg=4)
def test_detrend_linear(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10, detrend='linear')
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self):
x = np.arange(10, dtype=np.float64) + 0.04
f1, p1 = welch(x, nperseg=10, detrend=False)
f2, p2 = welch(x, nperseg=10, detrend=lambda x: x)
assert_allclose(f1, f2, atol=1e-15)
assert_allclose(p1, p2, atol=1e-15)
def test_detrend_external(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_m1(self):
x = np.arange(40, dtype=np.float64) + 0.04
x = x.reshape((2,2,10))
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
x = np.rollaxis(x, 2, 0)
f, p = welch(x, nperseg=10, axis=0,
detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_nd_axis_m1(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
f, p = welch(x, nperseg=10)
assert_array_equal(p.shape, (2, 1, 6))
assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[0,0,:], nperseg=10)
assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
def test_nd_axis_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((10,2,1))
f, p = welch(x, nperseg=10, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[:,0,0], nperseg=10)
assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, 10, 'hann', nperseg=8)
win = signal.get_window('hann', 8)
fe, pe = welch(x, 10, win, nperseg=None)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
assert_array_equal(fe.shape, (5,)) # because win length used as nperseg
assert_array_equal(pe.shape, (5,))
assert_raises(ValueError, welch, x,
10, win, nperseg=4) # because nperseg != win.shape[-1]
win_err = signal.get_window('hann', 32)
assert_raises(ValueError, welch, x,
10, win_err, nperseg=None) # win longer than signal
def test_empty_input(self):
f, p = welch([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = welch(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = welch(np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_data(self):
x = np.zeros(8)
x[0] = 1
#for string-like window, input signal length < nperseg value gives
#UserWarning, sets nperseg to x.shape[-1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8")
f, p = welch(x,window='hann') # default nperseg
f1, p1 = welch(x,window='hann', nperseg=256) # user-specified nperseg
f2, p2 = welch(x, nperseg=8) # valid nperseg, doesn't give warning
assert_allclose(f, f2)
assert_allclose(p, p2)
assert_allclose(f1, f2)
assert_allclose(p1, p2)
def test_window_long_or_nd(self):
assert_raises(ValueError, welch, np.zeros(4), 1, np.array([1,1,1,1,1]))
assert_raises(ValueError, welch, np.zeros(4), 1,
np.arange(6).reshape((2,3)))
def test_nondefault_noverlap(self):
x = np.zeros(64)
x[::8] = 1
f, p = welch(x, nperseg=16, noverlap=4)
q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,
1./6.])
assert_allclose(p, q, atol=1e-12)
def test_bad_noverlap(self):
assert_raises(ValueError, welch, np.zeros(4), 1, 'hann', 2, 7)
def test_nfft_too_short(self):
assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116,
0.17072113], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111,
0.07638889], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,
0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype,
'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
def test_padded_freqs(self):
x = np.zeros(12)
nfft = 24
f = fftfreq(nfft, 1.0)[:nfft//2+1]
f[-1] *= -1
fodd, _ = welch(x, nperseg=5, nfft=nfft)
feven, _ = welch(x, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
nfft = 25
f = fftfreq(nfft, 1.0)[:(nfft + 1)//2]
fodd, _ = welch(x, nperseg=5, nfft=nfft)
feven, _ = welch(x, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
def test_window_correction(self):
A = 20
fs = 1e4
nperseg = int(fs//10)
fsig = 300
ii = int(fsig*nperseg//fs) # Freq index of fsig
tt = np.arange(fs)/fs
x = A*np.sin(2*np.pi*fsig*tt)
for window in ['hann', 'bartlett', ('tukey', 0.1), 'flattop']:
_, p_spec = welch(x, fs=fs, nperseg=nperseg, window=window,
scaling='spectrum')
freq, p_dens = welch(x, fs=fs, nperseg=nperseg, window=window,
scaling='density')
# Check peak height at signal frequency for 'spectrum'
assert_allclose(p_spec[ii], A**2/2.0)
# Check integrated spectrum RMS for 'density'
assert_allclose(np.sqrt(np.trapz(p_dens, freq)), A*np.sqrt(2)/2,
rtol=1e-3)
def test_axis_rolling(self):
np.random.seed(1234)
x_flat = np.random.randn(1024)
_, p_flat = welch(x_flat)
for a in range(3):
newshape = [1,]*3
newshape[a] = -1
x = x_flat.reshape(newshape)
_, p_plus = welch(x, axis=a) # Positive axis index
_, p_minus = welch(x, axis=a-x.ndim) # Negative axis index
assert_equal(p_flat, p_plus.squeeze(), err_msg=a)
assert_equal(p_flat, p_minus.squeeze(), err_msg=a-x.ndim)
def test_average(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, average='median')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([.1, .05, 0., 1.54074396e-33, 0.])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_raises(ValueError, welch, x, nperseg=8,
average='unrecognised-average')
class TestCSD:
def test_pad_shorter_x(self):
x = np.zeros(8)
y = np.zeros(12)
f = np.linspace(0, 0.5, 7)
c = np.zeros(7,dtype=np.complex128)
f1, c1 = csd(x, y, nperseg=12)
assert_allclose(f, f1)
assert_allclose(c, c1)
def test_pad_shorter_y(self):
x = np.zeros(12)
y = np.zeros(8)
f = np.linspace(0, 0.5, 7)
c = np.zeros(7,dtype=np.complex128)
f1, c1 = csd(x, y, nperseg=12)
assert_allclose(f, f1)
assert_allclose(c, c1)
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_onesided_odd(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, scaling='spectrum')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
0.02083333])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.38194444])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_unk_scaling(self):
assert_raises(ValueError, csd, np.zeros(4, np.complex128),
np.ones(4, np.complex128), scaling='foo', nperseg=4)
def test_detrend_linear(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = csd(x, x, nperseg=10, detrend='linear')
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self):
x = np.arange(10, dtype=np.float64) + 0.04
f1, p1 = csd(x, x, nperseg=10, detrend=False)
f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x)
assert_allclose(f1, f2, atol=1e-15)
assert_allclose(p1, p2, atol=1e-15)
def test_detrend_external(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = csd(x, x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_m1(self):
x = np.arange(40, dtype=np.float64) + 0.04
x = x.reshape((2,2,10))
f, p = csd(x, x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
x = np.rollaxis(x, 2, 0)
f, p = csd(x, x, nperseg=10, axis=0,
detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_nd_axis_m1(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
f, p = csd(x, x, nperseg=10)
assert_array_equal(p.shape, (2, 1, 6))
assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
f0, p0 = csd(x[0,0,:], x[0,0,:], nperseg=10)
assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
def test_nd_axis_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((10,2,1))
f, p = csd(x, x, nperseg=10, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
f0, p0 = csd(x[:,0,0], x[:,0,0], nperseg=10)
assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, 10, 'hann', 8)
win = signal.get_window('hann', 8)
fe, pe = csd(x, x, 10, win, nperseg=None)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
assert_array_equal(fe.shape, (5,)) # because win length used as nperseg
assert_array_equal(pe.shape, (5,))
assert_raises(ValueError, csd, x, x,
10, win, nperseg=256) # because nperseg != win.shape[-1]
win_err = signal.get_window('hann', 32)
assert_raises(ValueError, csd, x, x,
10, win_err, nperseg=None) # because win longer than signal
def test_empty_input(self):
f, p = csd([],np.zeros(10))
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
f, p = csd(np.zeros(10),[])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = csd(np.empty(shape), np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
f, p = csd(np.ones(10), np.empty((5,0)))
assert_array_equal(f.shape, (5,0))
assert_array_equal(p.shape, (5,0))
f, p = csd(np.empty((5,0)), np.ones(10))
assert_array_equal(f.shape, (5,0))
assert_array_equal(p.shape, (5,0))
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = csd(np.empty(shape), np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
f, p = csd(np.empty((10,10,3)), np.zeros((10,0,1)), axis=1)
assert_array_equal(f.shape, (10,0,3))
assert_array_equal(p.shape, (10,0,3))
f, p = csd(np.empty((10,0,1)), np.zeros((10,10,3)), axis=1)
assert_array_equal(f.shape, (10,0,3))
assert_array_equal(p.shape, (10,0,3))
def test_short_data(self):
x = np.zeros(8)
x[0] = 1
#for string-like window, input signal length < nperseg value gives
#UserWarning, sets nperseg to x.shape[-1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8")
f, p = csd(x, x, window='hann') # default nperseg
f1, p1 = csd(x, x, window='hann', nperseg=256) # user-specified nperseg
f2, p2 = csd(x, x, nperseg=8) # valid nperseg, doesn't give warning
assert_allclose(f, f2)
assert_allclose(p, p2)
assert_allclose(f1, f2)
assert_allclose(p1, p2)
def test_window_long_or_nd(self):
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,
np.array([1,1,1,1,1]))
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,
np.arange(6).reshape((2,3)))
def test_nondefault_noverlap(self):
x = np.zeros(64)
x[::8] = 1
f, p = csd(x, x, nperseg=16, noverlap=4)
q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,
1./6.])
assert_allclose(p, q, atol=1e-12)
def test_bad_noverlap(self):
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, 'hann',
2, 7)
def test_nfft_too_short(self):
assert_raises(ValueError, csd, np.ones(12), np.zeros(12), nfft=3,
nperseg=4)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116,
0.17072113], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111,
0.07638889], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,
0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype,
'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
def test_padded_freqs(self):
x = np.zeros(12)
y = np.ones(12)
nfft = 24
f = fftfreq(nfft, 1.0)[:nfft//2+1]
f[-1] *= -1
fodd, _ = csd(x, y, nperseg=5, nfft=nfft)
feven, _ = csd(x, y, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
nfft = 25
f = fftfreq(nfft, 1.0)[:(nfft + 1)//2]
fodd, _ = csd(x, y, nperseg=5, nfft=nfft)
feven, _ = csd(x, y, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
class TestCoherence(object):
def test_identical_input(self):
x = np.random.randn(20)
y = np.copy(x) # So `y is x` -> False
f = np.linspace(0, 0.5, 6)
C = np.ones(6)
f1, C1 = coherence(x, y, nperseg=10)
assert_allclose(f, f1)
assert_allclose(C, C1)
def test_phase_shifted_input(self):
x = np.random.randn(20)
y = -x
f = np.linspace(0, 0.5, 6)
C = np.ones(6)
f1, C1 = coherence(x, y, nperseg=10)
assert_allclose(f, f1)
assert_allclose(C, C1)
class TestSpectrogram(object):
def test_average_all_segments(self):
x = np.random.randn(1024)
fs = 1.0
window = ('tukey', 0.25)
nperseg = 16
noverlap = 2
f, _, P = spectrogram(x, fs, window, nperseg, noverlap)
fw, Pw = welch(x, fs, window, nperseg, noverlap)
assert_allclose(f, fw)
assert_allclose(np.mean(P, axis=-1), Pw)
def test_window_external(self):
x = np.random.randn(1024)
fs = 1.0
window = ('tukey', 0.25)
nperseg = 16
noverlap = 2
f, _, P = spectrogram(x, fs, window, nperseg, noverlap)
win = signal.get_window(('tukey', 0.25), 16)
fe, _, Pe = spectrogram(x, fs, win, nperseg=None, noverlap=2)
assert_array_equal(fe.shape, (9,)) # because win length used as nperseg
assert_array_equal(Pe.shape, (9,73))
assert_raises(ValueError, spectrogram, x,
fs, win, nperseg=8) # because nperseg != win.shape[-1]
win_err = signal.get_window(('tukey', 0.25), 2048)
assert_raises(ValueError, spectrogram, x,
fs, win_err, nperseg=None) # win longer than signal
def test_short_data(self):
x = np.random.randn(1024)
fs = 1.0
#for string-like window, input signal length < nperseg value gives
#UserWarning, sets nperseg to x.shape[-1]
f, _, p = spectrogram(x, fs, window=('tukey',0.25)) # default nperseg
with suppress_warnings() as sup:
sup.filter(UserWarning,
"nperseg = 1025 is greater than input length = 1024, using nperseg = 1024")
f1, _, p1 = spectrogram(x, fs, window=('tukey',0.25),
nperseg=1025) # user-specified nperseg
f2, _, p2 = spectrogram(x, fs, nperseg=256) # to compare w/default
f3, _, p3 = spectrogram(x, fs, nperseg=1024) # compare w/user-spec'd
assert_allclose(f, f2)
assert_allclose(p, p2)
assert_allclose(f1, f3)
assert_allclose(p1, p3)
class TestLombscargle(object):
def test_frequency(self):
"""Test if frequency location of peak corresponds to frequency of
generated input signal.
"""
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
P = lombscargle(t, x, f)
# Check if difference between found frequency maximum and input
# frequency is less than accuracy
delta = f[1] - f[0]
assert_(w - f[np.argmax(P)] < (delta/2.))
def test_amplitude(self):
# Test if height of peak in normalized Lomb-Scargle periodogram
# corresponds to amplitude of the generated input signal.
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
pgram = lombscargle(t, x, f)
# Normalize
pgram = np.sqrt(4 * pgram / t.shape[0])
# Check if difference between found frequency maximum and input
# frequency is less than accuracy
assert_approx_equal(np.max(pgram), ampl, significant=2)
def test_precenter(self):
# Test if precenter gives the same result as manually precentering.
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
offset = 0.15 # Offset to be subtracted in pre-centering
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi) + offset
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
pgram = lombscargle(t, x, f, precenter=True)
pgram2 = lombscargle(t, x - x.mean(), f, precenter=False)
# check if centering worked
assert_allclose(pgram, pgram2)
def test_normalize(self):
# Test normalize option of Lomb-Scarge.
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
pgram = lombscargle(t, x, f)
pgram2 = lombscargle(t, x, f, normalize=True)
# check if normalization works as expected
assert_allclose(pgram * 2 / np.dot(x, x), pgram2)
assert_approx_equal(np.max(pgram2), 1.0, significant=2)
def test_wrong_shape(self):
t = np.linspace(0, 1, 1)
x = np.linspace(0, 1, 2)
f = np.linspace(0, 1, 3)
assert_raises(ValueError, lombscargle, t, x, f)
def test_zero_division(self):
t = np.zeros(1)
x = np.zeros(1)
f = np.zeros(1)
assert_raises(ZeroDivisionError, lombscargle, t, x, f)
def test_lombscargle_atan_vs_atan2(self):
# https://github.com/scipy/scipy/issues/3787
# This raised a ZeroDivisionError.
t = np.linspace(0, 10, 1000, endpoint=False)
x = np.sin(4*t)
f = np.linspace(0, 50, 500, endpoint=False) + 0.1
lombscargle(t, x, f*2*np.pi)
class TestSTFT(object):
def test_input_validation(self):
assert_raises(ValueError, check_COLA, 'hann', -10, 0)
assert_raises(ValueError, check_COLA, 'hann', 10, 20)
assert_raises(ValueError, check_COLA, np.ones((2,2)), 10, 0)
assert_raises(ValueError, check_COLA, np.ones(20), 10, 0)
assert_raises(ValueError, check_NOLA, 'hann', -10, 0)
assert_raises(ValueError, check_NOLA, 'hann', 10, 20)
assert_raises(ValueError, check_NOLA, np.ones((2,2)), 10, 0)
assert_raises(ValueError, check_NOLA, np.ones(20), 10, 0)
assert_raises(ValueError, check_NOLA, 'hann', 64, -32)
x = np.zeros(1024)
z = np.array(stft(x), dtype=object)
assert_raises(ValueError, stft, x, window=np.ones((2,2)))
assert_raises(ValueError, stft, x, window=np.ones(10), nperseg=256)
assert_raises(ValueError, stft, x, nperseg=-256)
assert_raises(ValueError, stft, x, nperseg=256, noverlap=1024)
assert_raises(ValueError, stft, x, nperseg=256, nfft=8)
assert_raises(ValueError, istft, x) # Not 2d
assert_raises(ValueError, istft, z, window=np.ones((2,2)))
assert_raises(ValueError, istft, z, window=np.ones(10), nperseg=256)
assert_raises(ValueError, istft, z, nperseg=-256)
assert_raises(ValueError, istft, z, nperseg=256, noverlap=1024)
assert_raises(ValueError, istft, z, nperseg=256, nfft=8)
assert_raises(ValueError, istft, z, nperseg=256, noverlap=0,
window='hann') # Doesn't meet COLA
assert_raises(ValueError, istft, z, time_axis=0, freq_axis=0)
assert_raises(ValueError, _spectral_helper, x, x, mode='foo')
assert_raises(ValueError, _spectral_helper, x[:512], x[512:],
mode='stft')
assert_raises(ValueError, _spectral_helper, x, x, boundary='foo')
def test_check_COLA(self):
settings = [
('boxcar', 10, 0),
('boxcar', 10, 9),
('bartlett', 51, 26),
('hann', 256, 128),
('hann', 256, 192),
('blackman', 300, 200),
(('tukey', 0.5), 256, 64),
('hann', 256, 255),
]
for setting in settings:
msg = '{0}, {1}, {2}'.format(*setting)
assert_equal(True, check_COLA(*setting), err_msg=msg)
def test_check_NOLA(self):
settings_pass = [
('boxcar', 10, 0),
('boxcar', 10, 9),
('boxcar', 10, 7),
('bartlett', 51, 26),
('bartlett', 51, 10),
('hann', 256, 128),
('hann', 256, 192),
('hann', 256, 37),
('blackman', 300, 200),
('blackman', 300, 123),
(('tukey', 0.5), 256, 64),
(('tukey', 0.5), 256, 38),
('hann', 256, 255),
('hann', 256, 39),
]
for setting in settings_pass:
msg = '{0}, {1}, {2}'.format(*setting)
assert_equal(True, check_NOLA(*setting), err_msg=msg)
w_fail = np.ones(16)
w_fail[::2] = 0
settings_fail = [
(w_fail, len(w_fail), len(w_fail) // 2),
('hann', 64, 0),
]
for setting in settings_fail:
msg = '{0}, {1}, {2}'.format(*setting)
assert_equal(False, check_NOLA(*setting), err_msg=msg)
def test_average_all_segments(self):
np.random.seed(1234)
x = np.random.randn(1024)
fs = 1.0
window = 'hann'
nperseg = 16
noverlap = 8
# Compare twosided, because onesided welch doubles non-DC terms to
# account for power at negative frequencies. stft doesn't do this,
# because it breaks invertibility.
f, _, Z = stft(x, fs, window, nperseg, noverlap, padded=False,
return_onesided=False, boundary=None)
fw, Pw = welch(x, fs, window, nperseg, noverlap, return_onesided=False,
scaling='spectrum', detrend=False)
assert_allclose(f, fw)
assert_allclose(np.mean(np.abs(Z)**2, axis=-1), Pw)
def test_permute_axes(self):
np.random.seed(1234)
x = np.random.randn(1024)
fs = 1.0
window = 'hann'
nperseg = 16
noverlap = 8
f1, t1, Z1 = stft(x, fs, window, nperseg, noverlap)
f2, t2, Z2 = stft(x.reshape((-1, 1, 1)), fs, window, nperseg, noverlap,
axis=0)
t3, x1 = istft(Z1, fs, window, nperseg, noverlap)
t4, x2 = istft(Z2.T, fs, window, nperseg, noverlap, time_axis=0,
freq_axis=-1)
assert_allclose(f1, f2)
assert_allclose(t1, t2)
assert_allclose(t3, t4)
assert_allclose(Z1, Z2[:, 0, 0, :])
assert_allclose(x1, x2[:, 0, 0])
def test_roundtrip_real(self):
np.random.seed(1234)
settings = [
('boxcar', 100, 10, 0), # Test no overlap
('boxcar', 100, 10, 9), # Test high overlap
('bartlett', 101, 51, 26), # Test odd nperseg
('hann', 1024, 256, 128), # Test defaults
(('tukey', 0.5), 1152, 256, 64), # Test Tukey
('hann', 1024, 256, 255), # Test overlapped hann
]
for window, N, nperseg, noverlap in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=False)
tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
window=window)
msg = '{0}, {1}'.format(window, noverlap)
assert_allclose(t, tr, err_msg=msg)
assert_allclose(x, xr, err_msg=msg)
def test_roundtrip_not_nola(self):
np.random.seed(1234)
w_fail = np.ones(16)
w_fail[::2] = 0
settings = [
(w_fail, 256, len(w_fail), len(w_fail) // 2),
('hann', 256, 64, 0),
]
for window, N, nperseg, noverlap in settings:
msg = '{0}, {1}, {2}, {3}'.format(window, N, nperseg, noverlap)
assert not check_NOLA(window, nperseg, noverlap), msg
t = np.arange(N)
x = 10 * np.random.randn(t.size)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=True,
boundary='zeros')
with pytest.warns(UserWarning, match='NOLA'):
tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
window=window, boundary=True)
assert np.allclose(t, tr[:len(t)]), msg
assert not np.allclose(x, xr[:len(x)]), msg
def test_roundtrip_nola_not_cola(self):
np.random.seed(1234)
settings = [
('boxcar', 100, 10, 3), # NOLA True, COLA False
('bartlett', 101, 51, 37), # NOLA True, COLA False
('hann', 1024, 256, 127), # NOLA True, COLA False
(('tukey', 0.5), 1152, 256, 14), # NOLA True, COLA False
('hann', 1024, 256, 5), # NOLA True, COLA False
]
for window, N, nperseg, noverlap in settings:
msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap)
assert check_NOLA(window, nperseg, noverlap), msg
assert not check_COLA(window, nperseg, noverlap), msg
t = np.arange(N)
x = 10 * np.random.randn(t.size)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=True,
boundary='zeros')
tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
window=window, boundary=True)
msg = '{0}, {1}'.format(window, noverlap)
assert_allclose(t, tr[:len(t)], err_msg=msg)
assert_allclose(x, xr[:len(x)], err_msg=msg)
def test_roundtrip_float32(self):
np.random.seed(1234)
settings = [('hann', 1024, 256, 128)]
for window, N, nperseg, noverlap in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size)
x = x.astype(np.float32)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=False)
tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
window=window)
msg = '{0}, {1}'.format(window, noverlap)
assert_allclose(t, t, err_msg=msg)
assert_allclose(x, xr, err_msg=msg, rtol=1e-4, atol=1e-5)
assert_(x.dtype == xr.dtype)
def test_roundtrip_complex(self):
np.random.seed(1234)
settings = [
('boxcar', 100, 10, 0), # Test no overlap
('boxcar', 100, 10, 9), # Test high overlap
('bartlett', 101, 51, 26), # Test odd nperseg
('hann', 1024, 256, 128), # Test defaults
(('tukey', 0.5), 1152, 256, 64), # Test Tukey
('hann', 1024, 256, 255), # Test overlapped hann
]
for window, N, nperseg, noverlap in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size) + 10j*np.random.randn(t.size)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=False,
return_onesided=False)
tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
window=window, input_onesided=False)
msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap)
assert_allclose(t, tr, err_msg=msg)
assert_allclose(x, xr, err_msg=msg)
# Check that asking for onesided switches to twosided
with suppress_warnings() as sup:
sup.filter(UserWarning,
"Input data is complex, switching to return_onesided=False")
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=False,
return_onesided=True)
tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
window=window, input_onesided=False)
msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap)
assert_allclose(t, tr, err_msg=msg)
assert_allclose(x, xr, err_msg=msg)
def test_roundtrip_boundary_extension(self):
np.random.seed(1234)
# Test against boxcar, since window is all ones, and thus can be fully
# recovered with no boundary extension
settings = [
('boxcar', 100, 10, 0), # Test no overlap
('boxcar', 100, 10, 9), # Test high overlap
]
for window, N, nperseg, noverlap in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=True,
boundary=None)
_, xr = istft(zz, noverlap=noverlap, window=window, boundary=False)
for boundary in ['even', 'odd', 'constant', 'zeros']:
_, _, zz_ext = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=True,
boundary=boundary)
_, xr_ext = istft(zz_ext, noverlap=noverlap, window=window,
boundary=True)
msg = '{0}, {1}, {2}'.format(window, noverlap, boundary)
assert_allclose(x, xr, err_msg=msg)
assert_allclose(x, xr_ext, err_msg=msg)
def test_roundtrip_padded_signal(self):
np.random.seed(1234)
settings = [
('boxcar', 101, 10, 0),
('hann', 1000, 256, 128),
]
for window, N, nperseg, noverlap in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=True)
tr, xr = istft(zz, noverlap=noverlap, window=window)
msg = '{0}, {1}'.format(window, noverlap)
# Account for possible zero-padding at the end
assert_allclose(t, tr[:t.size], err_msg=msg)
assert_allclose(x, xr[:x.size], err_msg=msg)
def test_roundtrip_padded_FFT(self):
np.random.seed(1234)
settings = [
('hann', 1024, 256, 128, 512),
('hann', 1024, 256, 128, 501),
('boxcar', 100, 10, 0, 33),
(('tukey', 0.5), 1152, 256, 64, 1024),
]
for window, N, nperseg, noverlap, nfft in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size)
xc = x*np.exp(1j*np.pi/4)
# real signal
_, _, z = stft(x, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
window=window, detrend=None, padded=True)
# complex signal
_, _, zc = stft(xc, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
window=window, detrend=None, padded=True,
return_onesided=False)
tr, xr = istft(z, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
window=window)
tr, xcr = istft(zc, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
window=window, input_onesided=False)
msg = '{0}, {1}'.format(window, noverlap)
assert_allclose(t, tr, err_msg=msg)
assert_allclose(x, xr, err_msg=msg)
assert_allclose(xc, xcr, err_msg=msg)
def test_axis_rolling(self):
np.random.seed(1234)
x_flat = np.random.randn(1024)
_, _, z_flat = stft(x_flat)
for a in range(3):
newshape = [1,]*3
newshape[a] = -1
x = x_flat.reshape(newshape)
_, _, z_plus = stft(x, axis=a) # Positive axis index
_, _, z_minus = stft(x, axis=a-x.ndim) # Negative axis index
assert_equal(z_flat, z_plus.squeeze(), err_msg=a)
assert_equal(z_flat, z_minus.squeeze(), err_msg=a-x.ndim)
# z_flat has shape [n_freq, n_time]
# Test vs. transpose
_, x_transpose_m = istft(z_flat.T, time_axis=-2, freq_axis=-1)
_, x_transpose_p = istft(z_flat.T, time_axis=0, freq_axis=1)
assert_allclose(x_flat, x_transpose_m, err_msg='istft transpose minus')
assert_allclose(x_flat, x_transpose_p, err_msg='istft transpose plus')
| bsd-3-clause | 3,733,224,388,705,972,000 | 35.477755 | 105 | 0.519346 | false | 2.962424 | true | false | false |
eustislab/horton | data/examples/ap1rog/h2_cholesky_aug-cc-pvdz.py | 1 | 3689 | #!/usr/bin/env python
from horton import *
import numpy as np
###############################################################################
## Set up molecule, define basis set ##########################################
###############################################################################
# get the XYZ file from HORTON's test data directory
fn_xyz = context.get_fn('test/h2.xyz')
mol = IOData.from_file(fn_xyz)
obasis = get_gobasis(mol.coordinates, mol.numbers, 'aug-cc-pVDZ')
###############################################################################
## Define Occupation model, expansion coefficients and overlap ################
###############################################################################
lf = CholeskyLinalgFactory(obasis.nbasis)
occ_model = AufbauOccModel(1)
orb = lf.create_expansion(obasis.nbasis)
olp = obasis.compute_overlap(lf)
###############################################################################
## Construct Hamiltonian ######################################################
###############################################################################
kin = obasis.compute_kinetic(lf)
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, lf)
er = obasis.compute_electron_repulsion(lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RExchangeTerm(er, 'x_hf'),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, external)
###############################################################################
## Perform initial guess ######################################################
###############################################################################
guess_core_hamiltonian(olp, kin, na, orb)
###############################################################################
## Do a Hartree-Fock calculation ##############################################
###############################################################################
scf_solver = PlainSCFSolver(1e-6)
scf_solver(ham, lf, olp, occ_model, orb)
###############################################################################
## Combine one-electron integrals to single Hamiltonian #######################
###############################################################################
one = kin.copy()
one.iadd(na)
###############################################################################
## Do OO-AP1roG optimization ##################################################
###############################################################################
ap1rog = RAp1rog(lf, occ_model)
energy, c, l = ap1rog(one, er, external['nn'], orb, olp, True, **{
'indextrans': 'tensordot',
'warning': False,
'checkpoint': 1,
'levelshift': 1e-8,
'absolute': False,
'givensrot': np.array([[]]),
'swapa': np.array([[]]),
'sort': True,
'guess': {'type': 'random', 'factor': -0.1, 'geminal': None, 'lagrange': None},
'solver': {'wfn': 'krylov', 'lagrange': 'krylov'},
'maxiter': {'wfniter': 200, 'orbiter': 100},
'dumpci': {'amplitudestofile': False, 'amplitudesfilename': './ap1rog_amplitudes.dat'},
'thresh': {'wfn': 1e-12, 'energy': 1e-8, 'gradientnorm': 1e-4, 'gradientmax': 5e-5},
'printoptions': {'geminal': True, 'ci': 0.01, 'excitationlevel': 1},
'stepsearch': {'method': 'trust-region', 'alpha': 1.0, 'c1': 0.0001, 'minalpha': 1e-6, 'maxiterouter': 10, 'maxiterinner': 500, 'maxeta': 0.75, 'mineta': 0.25, 'upscale': 2.0, 'downscale': 0.25, 'trustradius': 0.75, 'maxtrustradius': 0.75, 'threshold': 1e-8, 'optimizer': 'ddl'},
'orbitaloptimizer': 'variational'
}
)
| gpl-3.0 | -2,095,701,366,258,300,700 | 50.957746 | 283 | 0.412307 | false | 4.140292 | false | true | false |
futurecoin1/futurecoin1 | contrib/linearize/linearize-hashes.py | 105 | 2762 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 15715
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit | 7,122,426,798,255,612,000 | 25.056604 | 78 | 0.661839 | false | 2.957173 | false | false | false |
sanaldavis/Google-Python-Exercies | basic/mimic.py | 1 | 2511 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
word=open(filename).read().split()
mimic_dict={}
prev=''
for words in word:
if not prev in mimic_dict:
mimic_dict[prev]=[words]
else:
mimic_dict[prev].append(words)
prev=words
return mimic_dict
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
for i in range(200):
print word,
nexts=mimic_dict.get(word)
if not nexts:
nexts=mimic_dict['']
word=random.choice(nexts)
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print dict
print_mimic(dict, '')
if __name__ == '__main__':
main()
| apache-2.0 | -6,352,672,385,858,135,000 | 28.197674 | 78 | 0.709677 | false | 3.444444 | false | false | false |
UstadMobile/exelearning-ustadmobile-work | exe/webui/readabilitypresetspage.py | 1 | 3166 | '''
Created on Sep 30, 2014
@author: mike
'''
import logging
import json
from twisted.web.resource import Resource
from exe.webui.renderable import RenderableResource
from exe.engine.readabilityutil import ReadabilityUtil
from exe.engine.path import Path
import os.path
from exe import globals as G
import re
import uuid
class ReadabilityPresetsPage(RenderableResource):
'''
This page delivers AJAX about readability presets
'''
name = 'readabilitypresets'
def __init__(self, parent):
'''
Constructor
'''
RenderableResource.__init__(self, parent)
def render_POST(self, request):
action = None
result = {}
if 'action' in request.args:
action = request.args['action'][0]
if action == "savepreset":
json_str = request.args['presetval'][0]
json_obj = json.loads(json_str, "utf-8")
json_obj = ReadabilityUtil().save_readability_preset(json_obj)
result = {
"success" : True,
"uuid" : json_obj['uuid']
}
return json.dumps(result)
def _check_is_clean_filename(self, filename):
"""Check a filename is only letters, numbers and dots - no slashes etc"""
filename_clean = re.sub("[^a-z0-9A-Z\\-\\.]+", "", filename)
if filename_clean != filename:
raise ValueError("Invalid chars in filename")
def render_GET(self, request):
action = None
if 'action' in request.args:
action = request.args['action'][0]
#result that will be put into json and sent back
result = {}
if action == "list_params_by_lang":
result = self.list_params_by_lang(request)
elif action == "list_presets":
result = ReadabilityUtil().list_readability_preset_ids("erp2")
elif action == "get_preset_by_id":
preset_id = request.args['presetid'][0]
result = ReadabilityUtil().get_readability_preset_by_id(preset_id)
elif action == "delete_preset_by_id":
preset_id = request.args['presetid'][0]
self._check_is_clean_filename(preset_id)
result = ReadabilityUtil().delete_readability_preset_by_id(preset_id)
else:
extension_req = request.args['type'][0]
extension_clean = re.sub("[^a-z0-9]+", "", extension_req)
if extension_clean != extension_req:
raise ValueError("Can only allow letters and numbers in extension")
readability_presets = ReadabilityUtil().list_readability_presets(extension_req)
result['rootList'] = []
for preset_name in readability_presets:
result['rootList'].append({"filename" : preset_name, "basename" : preset_name})
return json.dumps(result)
def list_params_by_lang(self, request):
lang_code = request.args['lang'][0]
return ReadabilityUtil.get_params_by_lang(lang_code)
| gpl-2.0 | 1,791,234,677,561,414,400 | 33.423913 | 95 | 0.570752 | false | 4.106355 | false | false | false |
annarev/tensorflow | tensorflow/python/keras/utils/version_utils.py | 3 | 5247 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities for Keras classes with v1 and v2 versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras.utils.generic_utils import LazyLoader
# TODO(b/134426265): Switch back to single-quotes once the issue
# with copybara is fixed.
# pylint: disable=g-inconsistent-quotes
training = LazyLoader(
"training", globals(),
"tensorflow.python.keras.engine.training")
training_v1 = LazyLoader(
"training_v1", globals(),
"tensorflow.python.keras.engine.training_v1")
base_layer = LazyLoader(
"base_layer", globals(),
"tensorflow.python.keras.engine.base_layer")
base_layer_v1 = LazyLoader(
"base_layer_v1", globals(),
"tensorflow.python.keras.engine.base_layer_v1")
callbacks = LazyLoader(
"callbacks", globals(),
"tensorflow.python.keras.callbacks")
callbacks_v1 = LazyLoader(
"callbacks_v1", globals(),
"tensorflow.python.keras.callbacks_v1")
# pylint: enable=g-inconsistent-quotes
class ModelVersionSelector(object):
"""Chooses between Keras v1 and v2 Model class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
use_v2 = should_use_v2()
cls = swap_class(cls, training.Model, training_v1.Model, use_v2) # pylint: disable=self-cls-assignment
return super(ModelVersionSelector, cls).__new__(cls)
class LayerVersionSelector(object):
"""Chooses between Keras v1 and v2 Layer class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
use_v2 = should_use_v2()
cls = swap_class(cls, base_layer.Layer, base_layer_v1.Layer, use_v2) # pylint: disable=self-cls-assignment
return super(LayerVersionSelector, cls).__new__(cls)
class TensorBoardVersionSelector(object):
"""Chooses between Keras v1 and v2 TensorBoard callback class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
use_v2 = should_use_v2()
start_cls = cls
cls = swap_class(start_cls, callbacks.TensorBoard, callbacks_v1.TensorBoard,
use_v2)
if start_cls == callbacks_v1.TensorBoard and cls == callbacks.TensorBoard:
# Since the v2 class is not a subclass of the v1 class, __init__ has to
# be called manually.
return cls(*args, **kwargs)
return super(TensorBoardVersionSelector, cls).__new__(cls)
def should_use_v2():
"""Determine if v1 or v2 version should be used."""
if context.executing_eagerly():
return True
elif ops.executing_eagerly_outside_functions():
# Check for a v1 `wrap_function` FuncGraph.
# Code inside a `wrap_function` is treated like v1 code.
graph = ops.get_default_graph()
if (getattr(graph, "name", False) and
graph.name.startswith("wrapped_function")):
return False
return True
else:
return False
def swap_class(cls, v2_cls, v1_cls, use_v2):
"""Swaps in v2_cls or v1_cls depending on graph mode."""
if cls == object:
return cls
if cls in (v2_cls, v1_cls):
return v2_cls if use_v2 else v1_cls
# Recursively search superclasses to swap in the right Keras class.
new_bases = []
for base in cls.__bases__:
if ((use_v2 and issubclass(base, v1_cls)
# `v1_cls` often extends `v2_cls`, so it may still call `swap_class`
# even if it doesn't need to. That being said, it may be the safest
# not to over optimize this logic for the sake of correctness,
# especially if we swap v1 & v2 classes that don't extend each other,
# or when the inheritance order is different.
or (not use_v2 and issubclass(base, v2_cls)))):
new_base = swap_class(base, v2_cls, v1_cls, use_v2)
else:
new_base = base
new_bases.append(new_base)
cls.__bases__ = tuple(new_bases)
return cls
def disallow_legacy_graph(cls_name, method_name):
if not ops.executing_eagerly_outside_functions():
error_msg = (
"Calling `{cls_name}.{method_name}` in graph mode is not supported "
"when the `{cls_name}` instance was constructed with eager mode "
"enabled. Please construct your `{cls_name}` instance in graph mode or"
" call `{cls_name}.{method_name}` with eager mode enabled.")
error_msg = error_msg.format(cls_name=cls_name, method_name=method_name)
raise ValueError(error_msg)
def is_v1_layer_or_model(obj):
return isinstance(obj, (base_layer_v1.Layer, training_v1.Model))
| apache-2.0 | 3,143,262,255,125,004,300 | 37.29927 | 111 | 0.682295 | false | 3.574251 | false | false | false |
mondalaci/twitter-backup.py | twitter-backup.py | 1 | 5347 | #!/usr/bin/env python
import os
import os.path
from sys import exit, argv
import json
import oauth2 as oauth
from StringIO import StringIO
from urlparse import parse_qsl
# Please don't use this key and secret if you create a new version of this script.
# You can request your own API key at https://dev.twitter.com/apps/new
# (If you fork my repo to merely submit a pull request then you don't need to change this.)
consumer_key = 'I5Qy02p5CrIXw8Sa9ohw'
consumer_secret = 'ubG7dkIS6g2cjYshXM6gtN6dSZEekKTRZMKgjYIv4'
max_tweets_per_request = 200
access_token_filepath = '~/.config/twitter-backup.py/access-token.json'
def get_access_token_from_twitter():
# Taken from https://github.com/simplegeo/python-oauth2#twitter-three-legged-oauth-example
request_token_url = 'https://api.twitter.com/oauth/request_token'
access_token_url = 'https://api.twitter.com/oauth/access_token'
authorize_url = 'https://api.twitter.com/oauth/authorize'
client = oauth.Client(consumer)
# Step 1: Get a request token. This is a temporary token that is used for
# having the user authorize an access token and to sign the request to obtain
# said access token.
resp, content = client.request(request_token_url, "GET")
if resp['status'] != '200':
raise Exception("Invalid response %s." % resp['status'])
request_token = dict(parse_qsl(content))
# Step 2: Redirect to the provider. Since this is a CLI script we do not
# redirect. In a web application you would redirect the user to the URL
# below.
print "Visit %s?oauth_token=%s" % (authorize_url, request_token['oauth_token'])
# After the user has granted access to you, the consumer, the provider will
# redirect you to whatever URL you have told them to redirect to. You can
# usually define this in the oauth_callback argument as well.
oauth_verifier = raw_input('What is the PIN? ')
# Step 3: Once the consumer has redirected the user back to the oauth_callback
# URL you can request the access token the user has approved. You use the
# request token to sign this request. After this is done you throw away the
# request token and use the access token returned. You should store this
# access token somewhere safe, like a database, for future use.
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(oauth_verifier)
client = oauth.Client(consumer, token)
resp, content = client.request(access_token_url, "POST")
access_token = dict(parse_qsl(content))
if access_token == {}:
print 'Invalid PIN was given'
exit(1)
return access_token
def fetch_tweets(access_token, screen_name, max_id=None):
token = oauth.Token(access_token['oauth_token'], access_token['oauth_token_secret'])
client = oauth.Client(consumer, token)
screen_name = '' if screen_name==None else '&screen_name='+screen_name
max_id = '' if max_id==None else '&max_id='+str(max_id)
request_url = 'https://api.twitter.com/1.1/statuses/user_timeline.json?count=%d%s%s' % \
(max_tweets_per_request, screen_name, max_id)
response = client.request(request_url)
response_headers, response_body = response
tweets = json.load(StringIO(response_body))
return tweets
def get_earliest_tweet_id(tweets):
id = None
for tweet in tweets:
id = tweet['id']
return id
def save_tweets(json_object, filepath):
json_string = json.dumps(json_object, indent=4)
with open(filepath, 'w') as file:
file.write(json_string)
def save_access_token(token):
token_directory = os.path.dirname(get_access_token_file_path())
if not os.path.exists(token_directory):
os.makedirs(token_directory)
dumped_token = json.dumps(token)
with open(get_access_token_file_path(), 'w') as file:
file.write(dumped_token)
def load_access_token():
try:
with open(get_access_token_file_path(), 'r') as file:
access_token = json.load(file)
return access_token
except IOError:
return None
def get_access_token_file_path():
return os.path.expanduser(access_token_filepath)
def print_help():
print 'Usage: %s [SCREEN-NAME] | -h | --help' % (argv[0])
print 'Fetch the tweets of SCREEN-NAME'
print 'SCREEN-NAME is optional and defaults to the sceen name of the authorizing user'
# Main program
if len(argv) >= 2:
if argv[1] in ['-h', '--help']:
print_help()
exit(0)
else:
screen_name = argv[1]
else:
screen_name = None
consumer = oauth.Consumer(consumer_key, consumer_secret)
access_token = load_access_token()
if access_token == None:
access_token = get_access_token_from_twitter()
save_access_token(access_token)
earliest_tweet_id = None
page_number = 1
tweet_index = 0
while True:
tweets = fetch_tweets(access_token, screen_name, earliest_tweet_id)
if len(tweets) > 0:
dest_filename = '%02d.json' % (page_number)
print 'Saving tweet %d to %d as %s' % (tweet_index, tweet_index+len(tweets), dest_filename)
save_tweets(tweets, dest_filename)
earliest_tweet_id = get_earliest_tweet_id(tweets)
page_number += 1
tweet_index += len(tweets)
if len(tweets) < max_tweets_per_request:
break
| gpl-3.0 | -7,132,872,576,028,077,000 | 34.410596 | 99 | 0.680943 | false | 3.423175 | false | false | false |
SanPen/GridCal | src/GridCal/Engine/Simulations/NTC/net_transfer_capacity_driver.py | 1 | 15405 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import time
import json
import numpy as np
import numba as nb
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Core.snapshot_pf_data import compile_snapshot_circuit
from GridCal.Engine.Simulations.LinearFactors.linear_analysis import LinearAnalysis, make_worst_contingency_transfer_limits
from GridCal.Engine.Simulations.driver_types import SimulationTypes
from GridCal.Engine.Simulations.result_types import ResultTypes
from GridCal.Engine.Simulations.results_model import ResultsModel
from GridCal.Engine.Simulations.results_template import ResultsTemplate
from GridCal.Engine.Simulations.driver_template import DriverTemplate
########################################################################################################################
# Optimal Power flow classes
########################################################################################################################
@nb.njit()
def compute_ntc(ptdf, lodf, P0, flows, rates, idx1, idx2, threshold=0.02):
"""
Compute all lines' ATC
:param ptdf: Power transfer distribution factors (n-branch, n-bus)
:param lodf: Line outage distribution factors (n-branch, n-outage branch)
:param P0: all bus injections [p.u.]
:param flows: Line Sf [MW]
:param rates: all line rates vector
:param idx1: bus indices of the sending region
:param idx2: bus indices of the receiving region
:param threshold: value that determines if a line is studied for the ATC calculation
:return: ATC vector for all the lines
"""
nbr = ptdf.shape[0]
nbus = ptdf.shape[1]
# declare the bus injections increment due to the transference
dTi = np.zeros(nbus)
# set the sending power increment proportional to the current power
# dTi[idx1] = dT * (P0[idx1] / P0[idx1].sum())
dTi[idx1] = P0[idx1] / P0[idx1].sum()
# set the receiving power increment proportional to the current power
# dTi[idx2] = -dT * (P0[idx2] / P0[idx2].sum())
dTi[idx2] = -P0[idx2] / P0[idx2].sum()
# compute the line flow increments due to the exchange increment dT in MW
dFlow = ptdf.dot(dTi)
# this operation proves the same result: dFlow == dFlow2
# Pbr = np.dot(ptdf, (P0 * Sbase) + dTi)
# dFlow2 = Pbr - flows
# compute the sensitivities to the exchange
# alpha = dFlow / dT
alpha = dFlow / 1.0
# explore the ATC
atc_max = np.zeros(nbr)
atc_min = np.zeros(nbr)
worst_max = 0
worst_min = 0
worst_contingency_max = 0
worst_contingency_min = 0
PS_max = -1e20 # should increase
PS_min = 1e20 # should decrease
for m in range(nbr): # for each branch
if abs(alpha[m]) > threshold and abs(flows[m]) < rates[m]: # if the branch is relevant enough for the NTC...
# explore the ATC in "N-1"
for c in range(nbr): # for each contingency
if m != c:
# compute the OTDF
otdf = alpha[m] + lodf[m, c] * alpha[c]
# compute the contingency flow
contingency_flow = flows[m] + lodf[m, c] * flows[c]
if abs(otdf) > threshold:
# compute the branch+contingency ATC for each "sense" of the flow
alpha_ij = (rates[m] - contingency_flow) / otdf
beta_ij = (-rates[m] - contingency_flow) / otdf
#
alpha_p_ij = min(alpha_ij, beta_ij)
beta_p_ij = max(alpha_ij, beta_ij)
if alpha_p_ij > PS_max:
PS_max = alpha_p_ij
atc_max[m] = alpha_p_ij
worst_max = m
worst_contingency_max = c
if beta_p_ij < PS_min:
PS_min = beta_p_ij
atc_min[m] = alpha_p_ij
worst_min = m
worst_contingency_min = c
if PS_min == 1e20:
PS_min = 0.0
if PS_max == -1e20:
PS_max = 0.0
return alpha, atc_max, atc_min, worst_max, worst_min, worst_contingency_max, worst_contingency_min, PS_max, PS_min
class NetTransferCapacityResults(ResultsTemplate):
def __init__(self, n_br, n_bus, br_names, bus_names, bus_types, bus_idx_from, bus_idx_to):
"""
:param n_br:
:param n_bus:
:param br_names:
:param bus_names:
:param bus_types:
"""
ResultsTemplate.__init__(self,
name='ATC Results',
available_results=[ResultTypes.NetTransferCapacityPS,
ResultTypes.NetTransferCapacityAlpha,
ResultTypes.NetTransferCapacityReport
],
data_variables=['alpha',
'atc_max',
'atc_min',
'worst_max',
'worst_min',
'worst_contingency_max',
'worst_contingency_min',
'PS_max',
'PS_min',
'report',
'report_headers',
'report_indices',
'branch_names',
'bus_names',
'bus_types',
'bus_idx_from',
'bus_idx_to'])
self.n_br = n_br
self.n_bus = n_bus
self.branch_names = br_names
self.bus_names = bus_names
self.bus_types = bus_types
self.bus_idx_from = bus_idx_from
self.bus_idx_to = bus_idx_to
# stores the worst transfer capacities (from to) and (to from)
self.alpha = np.zeros(self.n_br)
self.alpha = np.zeros(self.n_br)
self.atc_max = np.zeros(self.n_br)
self.atc_min = np.zeros(self.n_br)
self.worst_max = 0
self.worst_min = 0
self.worst_contingency_max = 0
self.worst_contingency_min = 0
self.PS_max = 0.0
self.PS_min = 0.0
self.report = np.empty((1, 8), dtype=object)
self.report_headers = ['Branch min',
'Branch max',
'Worst Contingency min',
'Worst Contingency max',
'ATC max',
'ATC min',
'PS max',
'PS min']
self.report_indices = ['All']
def get_steps(self):
return
def make_report(self):
"""
:return:
"""
self.report = np.empty((1, 8), dtype=object)
self.report_headers = ['Branch min',
'Branch max',
'Worst Contingency min',
'Worst Contingency max',
'ATC max',
'ATC min',
'PS max',
'PS min']
self.report_indices = ['All']
self.report[0, 0] = self.branch_names[self.worst_max]
self.report[0, 1] = self.branch_names[self.worst_min]
self.report[0, 2] = self.branch_names[self.worst_contingency_max]
self.report[0, 3] = self.branch_names[self.worst_contingency_min]
self.report[0, 4] = self.atc_max[self.worst_max]
self.report[0, 5] = self.atc_min[self.worst_min]
self.report[0, 6] = self.PS_max
self.report[0, 7] = self.PS_min
def get_results_dict(self):
"""
Returns a dictionary with the results sorted in a dictionary
:return: dictionary of 2D numpy arrays (probably of complex numbers)
"""
data = {'atc_max': self.atc_max.tolist(),
'atc_min': self.atc_min.tolist(),
'PS_max': self.PS_max,
'PS_min': self.PS_min}
return data
def mdl(self, result_type: ResultTypes):
"""
Plot the results
:param result_type:
:return:
"""
index = self.branch_names
if result_type == ResultTypes.NetTransferCapacityPS:
data = np.array([self.PS_min, self.PS_max])
y_label = '(MW)'
title, _ = result_type.value
labels = ['Power shift']
index = ['PS min', 'PS max']
elif result_type == ResultTypes.NetTransferCapacityAlpha:
data = self.alpha
y_label = '(p.u.)'
title, _ = result_type.value
labels = ['Sensitivity to the exchange']
index = self.branch_names
elif result_type == ResultTypes.NetTransferCapacityReport:
data = np.array(self.report)
y_label = ''
title, _ = result_type.value
index = self.report_indices
labels = self.report_headers
else:
raise Exception('Result type not understood:' + str(result_type))
# assemble model
mdl = ResultsModel(data=data,
index=index,
columns=labels,
title=title,
ylabel=y_label)
return mdl
class NetTransferCapacityOptions:
def __init__(self, distributed_slack=True, correct_values=True,
bus_idx_from=list(), bus_idx_to=list(), dT=100.0, threshold=0.02):
"""
:param distributed_slack:
:param correct_values:
:param bus_idx_from:
:param bus_idx_to:
:param dT:
:param threshold:
"""
self.distributed_slack = distributed_slack
self.correct_values = correct_values
self.bus_idx_from = bus_idx_from
self.bus_idx_to = bus_idx_to
self.dT = dT
self.threshold = threshold
class NetTransferCapacityDriver(DriverTemplate):
tpe = SimulationTypes.NetTransferCapacity_run
name = tpe.value
def __init__(self, grid: MultiCircuit, options: NetTransferCapacityOptions):
"""
Power Transfer Distribution Factors class constructor
@param grid: MultiCircuit Object
@param options: OPF options
@:param pf_results: PowerFlowResults, this is to get the Sf
"""
DriverTemplate.__init__(self, grid=grid)
# Options to use
self.options = options
# OPF results
self.results = NetTransferCapacityResults(n_br=0,
n_bus=0,
br_names=[],
bus_names=[],
bus_types=[],
bus_idx_from=[],
bus_idx_to=[])
def run(self):
"""
Run thread
"""
start = time.time()
self.progress_text.emit('Analyzing')
self.progress_signal.emit(0)
# compile the circuit
nc = compile_snapshot_circuit(self.grid)
# get the converted bus indices
# idx1b, idx2b = compute_transfer_indices(idx1=self.options.bus_idx_from,
# idx2=self.options.bus_idx_to,
# bus_types=nc.bus_types)
idx1b = self.options.bus_idx_from
idx2b = self.options.bus_idx_to
# declare the linear analysis
linear = LinearAnalysis(grid=self.grid)
linear.run()
# declare the results
self.results = NetTransferCapacityResults(n_br=linear.numerical_circuit.nbr,
n_bus=linear.numerical_circuit.nbus,
br_names=linear.numerical_circuit.branch_names,
bus_names=linear.numerical_circuit.bus_names,
bus_types=linear.numerical_circuit.bus_types,
bus_idx_from=idx1b,
bus_idx_to=idx2b)
# compute NTC
alpha, atc_max, atc_min, worst_max, worst_min, \
worst_contingency_max, worst_contingency_min, PS_max, PS_min = compute_ntc(ptdf=linear.PTDF,
lodf=linear.LODF,
P0=nc.Sbus.real,
flows=linear.get_flows(nc.Sbus),
rates=nc.ContingencyRates,
idx1=idx1b,
idx2=idx2b)
# post-process and store the results
self.results.alpha = alpha
self.results.atc_max = atc_max
self.results.atc_min = atc_min
self.results.worst_max = worst_max
self.results.worst_max = worst_max
self.results.worst_contingency_max = worst_contingency_max
self.results.worst_contingency_min = worst_contingency_min
self.results.PS_max = PS_max
self.results.PS_min = PS_min
self.results.make_report()
end = time.time()
self.elapsed = end - start
self.progress_text.emit('Done!')
self.done_signal.emit()
def get_steps(self):
"""
Get variations list of strings
"""
return list()
if __name__ == '__main__':
from GridCal.Engine import *
fname = r'C:\Users\penversa\Git\GridCal\Grids_and_profiles\grids\IEEE 118 Bus - ntc_areas.gridcal'
main_circuit = FileOpen(fname).open()
options = NetTransferCapacityOptions()
driver = NetTransferCapacityDriver(main_circuit, options)
driver.run()
print()
| gpl-3.0 | 2,920,079,811,810,432,000 | 37.803526 | 123 | 0.487958 | false | 4.204421 | false | false | false |
arsfeld/conduit | conduit/modules/UNSUPPORTED/PicasaDesktopModule/PicasaDesktopModule.py | 1 | 7689 | import os
import csv
import logging
import xml.dom.minidom
log = logging.getLogger("modules.Picasa")
import conduit
import conduit.utils as Utils
import conduit.Vfs as Vfs
import conduit.Exceptions as Exceptions
import conduit.dataproviders.DataProvider as DataProvider
import conduit.datatypes.Photo as Photo
from gettext import gettext as _
FILENAME_IDX = 0
DISPLAYNAME_IDX = 1
PHOTOS_IDX = 2
PICASA_DIR = os.path.join(os.path.expanduser("~"),".picasa")
if os.path.exists(PICASA_DIR):
MODULES = {
"PicasaDesktopSource" : { "type": "dataprovider" }
}
log.info("Picasa desktop directory detected")
else:
MODULES = {}
log.info("Picasa desktop not installed")
class PicasaDesktopSource(DataProvider.DataSource):
_name_ = _("Picasa Desktop")
_description_ = _("Synchronize Picasa from Picasa Desktop")
_category_ = conduit.dataproviders.CATEGORY_PHOTOS
_module_type_ = "source"
_in_type_ = "file/photo"
_out_type_ = "file/photo"
_icon_ = "picasa"
_configurable_ = True
def __init__(self, *args):
DataProvider.DataSource.__init__(self)
self.albums = []
self.enabledAlbums = []
def _fix_picasa_image_filename(self, filename):
#Picasa stores the image filename in some weird relative format
#with $ = $HOME and $My Pictures = xdg pictures dir
parts = filename.split("\\")
if parts[0] == "$My Pictures":
#FIXME: Use xdg user dirs to get localised photo dir
parts[0] = os.path.join(os.environ['HOME'],'Pictures')
elif parts[0][0] == "$":
#Take care of other photos in ~ by replacing $ with $HOME
parts[0] = os.path.join(os.environ['HOME'],parts[0][1:])
elif parts[0] == "[z]":
#absolute paths
parts[0] = "/"
else:
log.warn("Could not convert picasa photo path to unix path")
return None
path = os.path.abspath(os.sep.join(parts))
return path
def _get_all_albums(self):
#only work if picasa has been configured to use a CSV DB
#http://www.zmarties.com/picasa/
dbfile = os.path.join(PICASA_DIR,'drive_c','Program Files','Picasa2','db','dirscanner.csv')
if not os.path.exists(dbfile):
raise Exceptions.RefreshError("Picasa Not Configured to use CSV Database")
pals = []
#Open the CSV file and find all entries with Type = 19 (albums)
f = open(dbfile, 'rt')
try:
reader = csv.DictReader(f)
for row in reader:
if row['Type'] == '19':
#wine picasa stores all pal files (that describes an album)
#in the following base dir
parts = [PICASA_DIR,
'drive_c',
'Documents and Settings',
os.getlogin(),
'Local Settings']
#and then as given in the csv file
#but first change the windows path to a linux one
parts += row['Name'].split("\\")
path = os.path.abspath(os.sep.join(parts))
pals.append(path)
finally:
f.close()
#parse each pal file to get album info
albums = []
for pal in pals:
log.debug("Parsing album file %s" % pal)
doc = xml.dom.minidom.parse(pal)
#album name
for prop in doc.getElementsByTagName('property'):
if prop.hasAttribute("name") and prop.getAttribute("name") == "name":
name = prop.getAttribute("value")
#image filenames
photos = []
for f in doc.getElementsByTagName('filename'):
filename = self._fix_picasa_image_filename(f.firstChild.data)
if filename != None:
photos.append(filename)
albums.append((
pal, #FILENAME_IDX
name, #DISPLAYNAME_IDX
photos)) #PHOTOS_IDX
return albums
def initialize(self):
return True
def refresh(self):
DataProvider.DataSource.refresh(self)
self.albums = []
try:
self.albums = self._get_all_albums()
except:
#re-raise the refresh error
raise
print self.albums
def get_all(self):
DataProvider.DataSource.get_all(self)
photos = []
for album in self.albums:
if album[FILENAME_IDX] in self.enabledAlbums:
for photouri in album[PHOTOS_IDX]:
if Vfs.uri_exists(photouri):
photos.append(photouri)
return photos
def get(self, LUID):
DataProvider.DataSource.get(self, LUID)
f = Photo.Photo(URI=LUID)
f.set_UID(LUID)
f.set_open_URI(LUID)
return f
def finish(self, aborted, error, conflict):
DataProvider.DataSource.finish(self)
self.albums = []
def configure(self, window):
import gobject
import gtk
def col1_toggled_cb(cell, path, model ):
#not because we get this cb before change state
checked = not cell.get_active()
model[path][2] = checked
val = model[path][FILENAME_IDX]
if checked and val not in self.enabledAlbums:
self.enabledAlbums.append(val)
elif not checked and val in self.enabledAlbums:
self.enabledAlbums.remove(val)
tree = Utils.dataprovider_glade_get_widget(
__file__,
"config.glade",
"PicasaDesktopConfigDialog"
)
tagtreeview = tree.get_widget("albumtreeview")
#Build a list of all the tags
list_store = gtk.ListStore( gobject.TYPE_STRING, #FILENAME_IDX
gobject.TYPE_STRING, #DISLAYNAME_IDX
gobject.TYPE_BOOLEAN, #active
)
#Fill the list store
for t in self._get_all_albums():
list_store.append((
t[FILENAME_IDX],
t[DISPLAYNAME_IDX],
t[FILENAME_IDX] in self.enabledAlbums)
)
#Set up the treeview
tagtreeview.set_model(list_store)
#column 1 is the album name
tagtreeview.append_column( gtk.TreeViewColumn(_("Album Name"),
gtk.CellRendererText(),
text=DISPLAYNAME_IDX)
)
#column 2 is a checkbox for selecting the album to sync
renderer1 = gtk.CellRendererToggle()
renderer1.set_property('activatable', True)
renderer1.connect( 'toggled', col1_toggled_cb, list_store )
tagtreeview.append_column( gtk.TreeViewColumn(_("Enabled"),
renderer1,
active=2)
)
dlg = tree.get_widget("PicasaDesktopConfigDialog")
response = Utils.run_dialog (dlg, window)
dlg.destroy()
print self.enabledAlbums
def get_configuration(self):
return {"enabledAlbums": self.enabledAlbums}
def get_UID(self):
return Utils.get_user_string()
| gpl-2.0 | -1,755,366,651,194,574,800 | 35.098592 | 99 | 0.528027 | false | 4.213151 | true | false | false |
OutOfOrder/sshproxy | sshproxy/server.py | 1 | 25905 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2005-2007 David Guerizec <[email protected]>
#
# Last modified: 2008 Jan 22, 14:19:03 by david
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os, sys, threading, socket
import paramiko
from paramiko import AuthenticationException
from registry import Registry
import cipher, util, log, proxy
import ipc
from options import OptionParser
from util import chanfmt, convert, SSHProxyError
from config import get_config
from dispatcher import Dispatcher
class IPCClientInterface(ipc.IPCInterface):
def __init__(self, server):
self.server = server
def __call__(self, chan):
# simulate an instanciation
ipc.IPCInterface.__init__(self, chan)
return self
class Server(Registry, paramiko.ServerInterface):
_class_id = "Server"
_singleton = True
def __reginit__(self, client, addr, host_key_file):
self.client = client
self.client_addr = addr
ipc_address = get_config('sshproxy').get('ipc_address',
'sshproxy-control')
handler = IPCClientInterface(self)
try:
self.monitor = ipc.IPCClient(ipc_address, handler=handler)
except:
log.exception("Couldn't create IPC channel to monitor")
raise
self.host_key = paramiko.DSSKey(filename=host_key_file)
#self.ip_addr, self.port = client.getsockname()
self.event = threading.Event()
self.args = []
self._remotes = {}
self.exit_status = -1
def get_ns_tag(self, namespace, tag, default=None):
return self.monitor.call('get_ns_tag', namespace=namespace,
tag=tag,
default=default)
def update_ns(self, name, value):
return self.monitor.call('update_ns', name=name, value=value)
def check_acl(self, acl_name):
return self.monitor.call('check_acl', acl_name)
def authorize(self, user_site, need_login=True):
return self.monitor.call('authorize', user_site=user_site,
need_login=need_login)
def setup_forward_handler(self, check_channel_direct_tcpip_request):
if check_channel_direct_tcpip_request:
self.check_channel_direct_tcpip_request = \
check_channel_direct_tcpip_request
def check_direct_tcpip_acl(self, chanid, origin, destination):
o_ip, o_port = origin
d_ip, d_port = destination
self.update_ns('proxy', {
'forward_ip': origin[0],
'forward_port': origin[1]
})
if not (self.check_acl('local_forwarding')):
log.debug("Local Port Forwarding not allowed by ACLs")
self.chan_send("Local Port Forwarding not allowed by ACLs\n")
return False
log.debug("Local Port Forwarding allowed by ACLs")
return True
def check_channel_x11_request(self, channel, single_connection,
x11_auth_proto, x11_auth_cookie, x11_screen_number):
class X11Channel(object):
pass
x11 = X11Channel()
x11.single_connection = single_connection
x11.x11_auth_proto = x11_auth_proto
x11.x11_auth_cookie = x11_auth_cookie
x11.x11_screen_number = x11_screen_number
self.x11 = x11
return True
def check_x11_acl(self):
if not hasattr(self, 'x11'):
log.debug("X11Forwarding not requested by the client")
return False
if not (self.check_acl('x11_forwarding')):
log.debug("X11Forwarding not allowed by ACLs")
return False
log.debug("X11Forwarding allowed by ACLs")
return True
def check_remote_port_forwarding(self):
if (hasattr(self, 'tcpip_forward_ip') and
hasattr(self, 'tcpip_forward_port')):
self.update_ns('proxy', {
'forward_ip': self.tcpip_forward_ip,
'forward_port': self.tcpip_forward_port
})
if not (self.check_acl('remote_forwarding')):
log.debug("Remote Port Forwarding not allowed by ACLs")
self.chan_send("Remote Port Forwarding not allowed by ACLs\n")
return False
log.debug("Remote Port Forwarding allowed by ACLs")
return True
return False
### STANDARD PARAMIKO SERVER INTERFACE
def check_unhandled_channel_request(self, channel, kind, want_reply, m):
log.debug("check_unhandled_channel_request %s", kind)
if kind == "[email protected]":
return True
return False
def check_global_request(self, kind, m):
log.devdebug("check_global_request %s", kind)
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_port_forward_request(self, address, port):
log.devdebug("check_port_forward_request %s %s", address, port)
self.tcpip_forward_ip = address
self.tcpip_forward_port = port
log.debug('tcpip-forward %s:%s' % (self.tcpip_forward_ip,
self.tcpip_forward_port))
return str(self.tcpip_forward_port)
def check_channel_request(self, kind, chanid):
log.devdebug("check_channel_request %s %s", kind, chanid)
if kind == 'session':
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_auth_password(self, username, password):
if self.valid_auth(username=username, password=password):
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def check_auth_publickey(self, username, key):
if self.valid_auth(username=username, pubkey=key.get_base64()):
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def get_allowed_auths(self, username):
return 'password,publickey'
def check_channel_shell_request(self, channel):
log.devdebug("check_channel_shell_request")
self.event.set()
return True
def check_channel_subsystem_request(self, channel, name):
log.devdebug("check_channel_subsystem_request %s %s", channel, name)
return paramiko.ServerInterface.check_channel_subsystem_request(self,
channel, name)
def check_channel_exec_request(self, channel, command):
log.devdebug('check_channel_exec_request %s %s', channel, command)
self.set_channel(channel)
value = self.set_exec_args(command)
self.event.set()
return value
def check_channel_pty_request(self, channel, term, width, height,
pixelwidth, pixelheight, modes):
self.set_term(term, width, height)
return True
def window_change_handler(self):
return False
def setup_window_change_handler(self, window_change_handler):
self.window_change_handler = window_change_handler
def check_channel_window_change_request(self, channel, width, height,
pixelwidth, pixelheight):
log.devdebug('window_change: %s %s' % (width, height))
self.set_term(self.term, width, height)
return self.window_change_handler()
### SSHPROXY SERVER INTERFACE
def valid_auth(self, username, password=None, pubkey=None):
if not self.monitor.call('authenticate',
username=username,
password=password,
pubkey=pubkey,
ip_addr=self.client_addr[0]):
self._unauth_pubkey = pubkey
return False
self.username = username
self.monitor.call('update_ns', 'client', {'username': username})
if hasattr(self, '_unauth_pubkey') and self._unauth_pubkey:
if self.monitor.call('add_client_pubkey', self._unauth_pubkey):
self.message_client("WARNING: Your public key"
" has been added to the keyring\n")
return True
#we can put here some logging for connection failures
def report_failure(self, reason, *args, **kwargs):
log.error("Failure: %s %s" % (reason, args[0]))
def message_client(self, msg):
self.queue_message(msg)
def queue_message(self, msg=None):
chan = getattr(self, 'chan', None)
if not hasattr(self, 'qmsg'):
self.qmsg = []
if msg is not None:
self.qmsg.append(msg)
if not chan:
return
while len(self.qmsg):
chan.send(chanfmt(self.qmsg.pop(0)))
def set_username(self, username):
self.username = username
def set_channel(self, chan):
self.chan = chan
def set_term(self, term, width, height):
self.term, self.width, self.height = term, width, height
def set_exec_args(self, argstr):
# XXX: naive arguments splitting
self.args = argstr.strip().split()
return True
def is_admin(self):
return self.is_authenticated() and self.monitor.call('is_admin')
def is_authenticated(self):
return hasattr(self, 'username')
def add_cmdline_options(self, parser):
if self.check_acl('admin'):
parser.add_option("", "--admin", dest="action",
help=_(u"run administrative commands"),
action="store_const",
const='admin',
)
if self.check_acl('console_session'):
parser.add_option("", "--console", dest="action",
help=_(u"open administration console"),
action="store_const",
const='console',
)
if self.check_acl('opt_list_sites'):
parser.add_option("-l", "--list-sites", dest="action",
help=_(u"list allowed sites"),
action="store_const",
const='list_sites',
)
if self.check_acl('opt_get_pubkey') or self.check_acl('opt_get_pkey'):
parser.add_option("", "--get-pubkey", dest="action",
help=_(u"display public key for user@host."),
action="store_const",
const="get_pubkey",
)
def parse_cmdline(self, args):
usage = u"""
pssh [options]
pssh [user@site [cmd]]
"""
parser = OptionParser(self.chan, usage=usage)
# add options from a mapping or a Registry callback
self.add_cmdline_options(parser)
return parser.parse_args(args)
def opt_admin(self, options, *args):
if not len(args):
self.chan.send(chanfmt(_(u'Missing argument, try --admin help '
'to get a list of commands.\n')))
return
resp = self.dispatcher.console('%s' % ' '.join(args)) or ''
self.chan.send(chanfmt(resp+'\n'))
def opt_console(self, options, *args):
return self.do_console()
def opt_list_sites(self, options, *args):
self.chan_send(self.run_cmd('list_sites %s'% ' '.join(args)))
def chan_send(self, s):
chan = self.chan
s = chanfmt(s)
sz = len(s)
while sz:
sent = chan.send(s)
if sent:
s = s[sent:]
sz = sz - sent
def run_cmd(self, cmd):
result = self.dispatcher.dispatch(cmd) or ''
return result + '\n'
def readlines(self):
buffer = []
chan = self.chan
chan.setblocking(True)
while True:
data = chan.recv(4096)
if not data:
chan.shutdown_read()
yield ''.join(buffer)
break
if '\n' in data:
yield ''.join(buffer) + data[:data.index('\n')+1]
buffer = [ data[data.index('\n')+1:] ]
else:
buffer.append(data)
def console_no_pty(self):
from server import Server
chan = self.chan
for data in self.readlines():
if not data:
continue
response = self.run_cmd(data)
self.chan_send(response)
def opt_get_pubkey(self, options, *args):
result = []
for site in args:
spubkey = util.get_site_pubkey(site)
if spubkey is None:
result.append(_(u"%s: No privkey tag found") % site)
continue
if len(spubkey):
result.append('%s: %s' % (site, ' '.join(spubkey)))
else:
result.append(_(u"%s: No privkey found") % site)
if not result:
result.append(_(u'Please give at least a site.'))
self.chan.send(chanfmt('\n'.join(result)+'\n'))
def do_eval_options(self, options, args):
if options.action and hasattr(self, 'opt_%s' % options.action):
getattr(self, 'opt_%s' % options.action)(options, *args)
def init_subsystems(self):
#self.transport.set_subsystem_handler('sftp', paramiko.SFTPServer,
# ProxySFTPServer)
pass
def start(self):
# start transport for the client
self.transport = paramiko.Transport(self.client)
self.transport.set_log_channel("paramiko")
# debug !!
#self.transport.set_hexdump(1)
try:
self.transport.load_server_moduli()
except:
raise
self.transport.add_server_key(self.host_key)
# start the server interface
negotiation_ev = threading.Event()
self.init_subsystems()
self.transport.start_server(negotiation_ev, self)
while not negotiation_ev.isSet():
negotiation_ev.wait(0.5)
if not self.transport.is_active():
raise 'SSH negotiation failed'
chan = self.transport.accept(60)
if chan is None:
log.error('cannot open the channel. '
'Check the transport object. Exiting..')
return
log.info('Authenticated %s', self.username)
self.event.wait(15)
if not self.event.isSet():
log.error('client never asked for a shell or a command.'
' Exiting.')
sys.exit(1)
self.set_channel(chan)
namespace = self.monitor.call('get_namespace')
self.dispatcher = Dispatcher(self.monitor, namespace)
try:
try:
# this is the entry point after initialization have been done
self.do_work()
# after this point, client is disconnected
except SSHProxyError, msg:
log.exception(msg)
chan.send(chanfmt(str(msg)+'\n'))
except Exception, msg:
log.exception("An error occured: %s" % msg)
chan.send(chanfmt(_(u"An error occured: %s\n") % msg))
finally:
if self.chan.active:
self.chan.send_exit_status(self.exit_status)
# close what we can
for item in ('chan', 'transport', 'ipc'):
try:
getattr(self, item).close()
except:
pass
return
def do_console(self):
if not self.check_acl('console_session'):
self.chan.send(chanfmt(_(u"ERROR: You are not allowed to"
" open a console session.\n")))
return False
self.monitor.call('update_ns', 'client', {'type': 'console'})
if hasattr(self, 'term'):
return self.dispatcher.console()
else:
return self.console_no_pty()
def do_scp(self):
args = []
argv = self.args[1:]
while True:
if argv[0][0] == '-':
args.append(argv.pop(0))
continue
break
site, path = argv[0].split(':', 1)
if not self.authorize(site, need_login=True):
self.chan.send(chanfmt(_(u"ERROR: %s does not exist "
"in your scope\n") % site))
return False
if '-t' in args:
upload = True
scpdir = 'upload'
else:
upload = False
scpdir = 'download'
self.update_ns('proxy', {
'scp_dir': scpdir,
'scp_path': path or '.',
'scp_args': ' '.join(args)
})
# check ACL for the given direction, then if failed, check general ACL
if not ((self.check_acl('scp_' + scpdir)) or
self.check_acl('scp_transfer')):
self.chan.send(chanfmt(_(u"ERROR: You are not allowed to"
" do scp file transfert in this"
" directory or direction on %s\n") % site))
return False
self.update_ns('client', {
'type': 'scp_%s' % scpdir,
})
conn = proxy.ProxyScp(self.chan, self.connect_site(), self.monitor)
try:
self.exit_status = conn.loop()
except AuthenticationException, msg:
self.chan.send("\r\n ERROR: %s." % msg +
"\r\n Please report this error "
"to your administrator.\r\n\r\n")
self.report_failure("site_authentication_error", msg)
return False
return True
def do_remote_execution(self):
site = self.args.pop(0)
if not self.authorize(site, need_login=True):
self.chan.send(chanfmt(_(u"ERROR: %s does not exist in "
"your scope\n") % site))
return False
self.update_ns('proxy', {'cmdline': (' '.join(self.args)).strip()})
if not self.check_acl('remote_exec'):
self.chan.send(chanfmt(_(u"ERROR: You are not allowed to"
" exec that command on %s"
"\n") % site))
return False
self.update_ns('client', {
'type': 'remote_exec',
})
conn = proxy.ProxyCmd(self.chan, self.connect_site(), self.monitor)
try:
self.exit_status = conn.loop()
except AuthenticationException, msg:
self.chan.send(_(u"\r\n ERROR: %s.") % msg +
_(u"\r\n Please report this error "
"to your administrator.\r\n\r\n"))
self.report_failure("site_authentication_error", msg)
return False
conn = None
log.info("Exiting %s", site)
return True
def do_shell_session(self):
site = self.args.pop(0)
if not self.authorize(site, need_login=True):
self.chan.send(chanfmt(_(u"ERROR: %s does not exist in "
"your scope\n") % site))
return False
if not self.check_acl('shell_session'):
self.chan.send(chanfmt(_(u"ERROR: You are not allowed to"
" open a shell session on %s"
"\n") % site))
return False
self.update_ns('client', {
'type': 'shell_session'
})
log.info("Connecting to %s", site)
conn = proxy.ProxyShell(self.chan, self.connect_site(), self.monitor)
try:
self.exit_status = conn.loop()
except AuthenticationException, msg:
self.chan.send(_(u"\r\n ERROR: %s.") % msg +
_(u"\r\n Please report this error "
"to your administrator.\r\n\r\n"))
self.report_failure("site_authentication_error", msg)
return False
except KeyboardInterrupt:
return True
except Exception, e:
self.chan.send(_(u"\r\n ERROR: It seems you found a bug."
"\r\n Please report this error "
"to your administrator.\r\n"
"Exception class: <%s>\r\n\r\n")
% e.__class__.__name__)
self.report_failure("bug", str(e))
raise
# if the direct connection closed, then exit cleanly
conn = None
log.info("Exiting %s", site)
return True
# XXX: stage2: make it easier to extend
# make explicit the stage automaton
def do_work(self):
# empty the message queue now we've got a valid channel
self.queue_message()
# this is a connection to the proxy console
if not len(self.args):
return self.do_console()
else:
# this is an option list
if len(self.args[0]) and self.args[0][0] == '-':
try:
options, args = self.parse_cmdline(self.args)
except 'EXIT':
return False
return self.do_eval_options(options, args)
# this is an scp file transfer
elif self.args[0] == 'scp':
return self.do_scp()
else:
site = self.args[0]
# this is a remote command execution
if len(self.args) > 1:
return self.do_remote_execution()
# this is a shell session
else:
return self.do_shell_session()
# Should never get there
return False
def connect_site(self, site_tags=None, site_ref=None):
tags = self.monitor.call('get_namespace')
if site_tags:
tags['site'] = site_tags
name = '%s@%s' % (tags['site']['login'],
tags['site']['name'])
hostkey = tags['proxy'].get('hostkey', None) or None
if site_ref is None:
if not tags['site'].get('ip_address'):
raise ValueError('Missing site address in database')
site_ref = (tags['site']['ip_address'],
int(tags['site'].get('port', 22)))
import socket
try:
transport = paramiko.Transport(site_ref)
except socket.error, msg:
raise SSHProxyError("Could not connect to site %s: %s"
% (name, msg[1]))
except Exception, msg:
raise SSHProxyError("Could not connect to site %s: %s"
% (name, str(msg)))
transport.start_client()
if hostkey is not None:
transport._preferred_keys = [ hostkey.get_name() ]
key = transport.get_remote_server_key()
if (key.get_name() != hostkey.get_name()
or str(key) != str(hostkey)):
log.error('Bad host key from server (%s).' % name)
raise AuthenticationError('Bad host key from server (%s).'
% name)
log.info('Server host key verified (%s) for %s' % (key.get_name(),
name))
privkey = cipher.decipher(tags['site'].get('privkey',
tags['site'].get('pkey', '')))
password = cipher.decipher(tags['site'].get('password', ''))
password_encoding = tags['site'].get('password_encoding', 'utf8')
password = convert(password, password_encoding)
authentified = False
if privkey:
privkey = util.get_dss_key_from_string(privkey)
try:
transport.auth_publickey(tags['site']['login'], privkey)
authentified = True
except AuthenticationException:
log.warning('PKey for %s was not accepted' % name)
if not authentified and password:
try:
transport.auth_password(tags['site']['login'], password)
authentified = True
except AuthenticationException:
log.error('Password for %s is not valid' % name)
raise
if not authentified:
raise AuthenticationException('No valid authentication token for %s'
% name)
chan = transport.open_session()
chan.settimeout(1.0)
return chan
Server.register()
| gpl-2.0 | -4,614,761,050,825,762,000 | 35.029207 | 80 | 0.525922 | false | 4.290328 | false | false | false |
Plurk/Solace | solace/utils/admin.py | 2 | 1492 | # -*- coding: utf-8 -*-
"""
solace.utils.admin
~~~~~~~~~~~~~~~~~~
Admin helpers.
:copyright: (c) 2009 by Plurk Inc., see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from solace import settings
from solace.i18n import _
from solace.application import url_for
from solace.templating import render_template
from solace.utils.mail import send_email
from solace.models import User, session
def ban_user(user):
"""Bans a user if it was not already banned. This also sends the
user an email that he was banned.
"""
if user.is_banned:
return
user.is_banned = True
send_email(_(u'User account banned'),
render_template('mails/user_banned.txt', user=user),
user.email)
session.commit()
def unban_user(user):
"""Unbans the user. What this actually does is sending the user
an email with a link to reactivate his account. For reactivation
he has to give himself a new password.
"""
if not user.is_banned:
return
if settings.REQUIRE_NEW_PASSWORD_ON_UNBAN:
user.is_active = False
user.is_banned = False
reset_url = url_for('core.reset_password', email=user.email,
key=user.password_reset_key, _external=True)
send_email(_(u'Your ban was lifted'),
render_template('mails/user_unbanned.txt', user=user,
reset_url=reset_url), user.email)
session.commit()
| bsd-3-clause | 5,443,603,702,995,309,000 | 29.44898 | 69 | 0.632038 | false | 3.647922 | false | false | false |
mcyprian/pyp2rpm | pyp2rpm/command/extract_dist.py | 1 | 3373 | import sys
import json
from distutils.core import Command
class extract_dist(Command):
"""Custom distutils command to extract metadata form setup function."""
description = ("Assigns self.distribution to class attribute to make "
"it accessible from outside a class.")
user_options = [('stdout', None,
'print metadata in json format to stdout')]
class_metadata = None
def __init__(self, *args, **kwargs):
"""Metadata dictionary is created, all the metadata attributes,
that were not found are set to default empty values. Checks of data
types are performed.
"""
Command.__init__(self, *args, **kwargs)
self.metadata = {}
for attr in ['setup_requires', 'tests_require', 'install_requires',
'packages', 'py_modules', 'scripts']:
self.metadata[attr] = to_list(getattr(self.distribution, attr, []))
try:
for k, v in getattr(
self.distribution, 'extras_require', {}).items():
if k in ['test, docs', 'doc', 'dev']:
attr = 'setup_requires'
else:
attr = 'install_requires'
self.metadata[attr] += to_list(v)
except (AttributeError, ValueError):
# extras require are skipped in case of wrong data format
# can't log here, because this file is executed in a subprocess
pass
for attr in ['url', 'long_description', 'description', 'license']:
self.metadata[attr] = to_str(
getattr(self.distribution.metadata, attr, None))
self.metadata['classifiers'] = to_list(
getattr(self.distribution.metadata, 'classifiers', []))
if isinstance(getattr(self.distribution, "entry_points", None), dict):
self.metadata['entry_points'] = self.distribution.entry_points
else:
self.metadata['entry_points'] = None
self.metadata['test_suite'] = getattr(
self.distribution, "test_suite", None) is not None
def initialize_options(self):
"""Sets default value of the stdout option."""
self.stdout = False
def finalize_options(self):
"""Abstract method of Command class have to be overridden."""
pass
def run(self):
"""Sends extracted metadata in json format to stdout if stdout
option is specified, assigns metadata dictionary to class_metadata
variable otherwise.
"""
if self.stdout:
sys.stdout.write("extracted json data:\n" + json.dumps(
self.metadata, default=to_str))
else:
extract_dist.class_metadata = self.metadata
def to_list(var):
"""Checks if given value is a list, tries to convert, if it is not."""
if var is None:
return []
if isinstance(var, str):
var = var.split('\n')
elif not isinstance(var, list):
try:
var = list(var)
except TypeError:
raise ValueError("{} cannot be converted to the list.".format(var))
return var
def to_str(var):
"""Similar to to_list function, but for string attributes."""
try:
return str(var)
except TypeError:
raise ValueError("{} cannot be converted to string.".format(var))
| mit | 3,313,173,511,970,343,400 | 34.882979 | 79 | 0.584939 | false | 4.595368 | false | false | false |
piskvorky/pattern | pattern/text/en/parser/brill.py | 1 | 9514 | #### PATTERN | EN | PARSER | BRILL LEXICON #########################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
# Brill lexicon with lexical and contextual rules, using lazy-laoding.
import os
try:
MODULE = os.path.dirname(__file__)
except:
MODULE = ""
#### BRILL LEXICAL RULES ###########################################################################
LEXICAL = ["char", "hassuf", "deletesuf", "addsuf", "haspref", "deletepref", "addpref"]
LEXICAL += ["goodleft", "goodright"]
LEXICAL.extend(["f"+x for x in LEXICAL])
LEXCIAL = dict.fromkeys(LEXICAL, True)
class LexicalRules(list):
def __init__(self, lexicon, path=os.path.join(MODULE, "Brill_lexical_rules.txt")):
# Brill's lexical rules.
# An entry looks like: ('fhassuf', ['NN', 's', 'fhassuf', '1', 'NNS', 'x']).
# The first item is the lookup command.
# If prefixed with an "f", it means that the token needs to have the first given tag (NN).
# In this case, if the NN-word ends with an "s", it is tagged as NNS.
self.lexicon = lexicon
self.path = path
def load(self):
for i, rule in enumerate(open(self.path).read().strip().split("\n")):
rule = rule.split()
for cmd in rule:
if cmd in LEXICAL:
list.append(self, (cmd, rule)); break
def __iter__(self):
if len(self) == 0:
self.load()
return list.__iter__(self)
def apply(self, token, previous=(None,None), next=(None,None)):
""" Applies the lexical rules to the given token.
A token is a [word,tag]-item whose tag might change if it matches a rule.
Rules are lexically based on word characters, prefixes and suffixes.
"""
word, pos = token[0], token[1]
if word[:1].isdigit() and word.replace(".","").isdigit():
return [word, "CD"]
for cmd, rule in iter(self):
pos = rule[-2]
x = rule[0]
if cmd.startswith("f"):
# Word must be tagged as the f-rule states.
cmd = cmd[1:]
if token[1] != rule[0]: continue
x = rule[1]
if (cmd == "char" and x in word) \
or (cmd == "hassuf" and word.endswith(x)) \
or (cmd == "deletesuf" and word.endswith(x) and word[:-len(x)] in self.lexicon) \
or (cmd == "haspref" and word.startswith(x)) \
or (cmd == "deletepref" and word.startswith(x) and word[len(x):] in self.lexicon) \
or (cmd == "addsuf" and word+x in self.lexicon) \
or (cmd == "addpref" and x+word in self.lexicon) \
or (cmd == "goodleft" and x == previous[0]) \
or (cmd == "goodright" and x == next[0]):
return [word, pos]
return token
#### BRILL CONTEXTUAL RULES ########################################################################
CONTEXTUAL = ["PREVTAG", "NEXTTAG", "PREV1OR2TAG", "NEXT1OR2TAG", "PREV1OR2OR3TAG", "NEXT1OR2OR3TAG"]
CONTEXTUAL += ["SURROUNDTAG", "PREVBIGRAM", "NEXTBIGRAM", "LBIGRAM", "RBIGRAM", "PREV2TAG", "NEXT2TAG"]
CONTEXTUAL += ["CURWD", "PREVWD", "NEXTWD", "PREV1OR2WD", "NEXT1OR2WD", "WDPREVTAG"]
CONTEXTUAL = dict.fromkeys(CONTEXTUAL, True)
class ContextualRules(list):
def __init__(self, lexicon, path=os.path.join(MODULE, "Brill_contextual_rules.txt")):
# Brill's contextual rules.
# An entry looks like: ('PREVTAG', ['VBD', 'VB', 'PREVTAG', 'TO']).
# The first item is the lookup command.
# The example rule reads like:
# "If the previous word is tagged TO, change this word's tag from VBD to VB (if it is VBD)".
self.lexicon = lexicon
self.path = path
def load(self):
for i, rule in enumerate(open(self.path).read().strip().split("\n")):
rule = rule.split()
for cmd in rule:
if cmd in CONTEXTUAL:
list.append(self, (cmd, rule)); break
def __iter__(self):
if len(self) == 0:
self.load()
return list.__iter__(self)
def apply(self, tokens):
""" Applies the contextual rules to the given list of tokens.
Each token is a [word,tag]-item whose tag might change if it matches a rule.
Rules are contextually based on the token's position in the sentence.
"""
b = [(None,"STAART")] * 3 # Add empty tokens so we can scan ahead and behind.
T = b + tokens + b
for i, token in enumerate(T):
for cmd, rule in iter(self):
# If the word is tagged differently than required by the rule, skip it.
if token[1] != rule[0]:
continue
# Never allow rules to tag "be" anything but infinitive.
if token[0] == "be" and token[1] == "VB":
continue
# A rule involves scanning the previous/next word or tag,
# and all combinations thereof.
x = rule[3]
if (cmd == "PREVTAG" and x == T[i-1][1]) \
or (cmd == "NEXTTAG" and x == T[i+1][1]) \
or (cmd == "PREV1OR2TAG" and x in (T[i-1][1], T[i-2][1])) \
or (cmd == "NEXT1OR2TAG" and x in (T[i+1][1], T[i+2][1])) \
or (cmd == "PREV1OR2OR3TAG" and x in (T[i-1][1], T[i-2][1], T[i-3][1])) \
or (cmd == "NEXT1OR2OR3TAG" and x in (T[i+1][1], T[i+2][1], T[i+3][1])) \
or (cmd == "SURROUNDTAG" and x == T[i-1][1] and rule[4] == T[i+1][1]) \
or (cmd == "PREVBIGRAM" and x == T[i-2][1] and rule[4] == T[i-1][1]) \
or (cmd == "NEXTBIGRAM" and x == T[i+1][1] and rule[4] == T[i+2][1]) \
or (cmd == "LBIGRAM" and x == T[i-1][0] and rule[4] == T[i][0]) \
or (cmd == "RBIGRAM" and x == T[i][0] and rule[4] == T[i+1][0]) \
or (cmd == "PREV2TAG" and x == T[i-2][1]) \
or (cmd == "NEXT2TAG" and x == T[i+2][1]) \
or (cmd == "CURWD" and x == T[i][0]) \
or (cmd == "PREVWD" and x == T[i-1][0]) \
or (cmd == "NEXTWD" and x == T[i+1][0]) \
or (cmd == "PREV1OR2WD" and x in (T[i-1][0], T[i-2][0])) \
or (cmd == "NEXT1OR2WD" and x in (T[i+1][0], T[i+2][0])) \
or (cmd == "WDPREVTAG" and x == T[i][0] and rule[4] == T[i-1][1]) \
or (cmd == "WDNEXTTAG" and x == T[i][0] and rule[4] == T[i+1][1]):
tokens[i-len(b)] = [tokens[i-len(b)][0], rule[1]]
# Brill's contextual rules assign tags based on a statistical majority vote.
# Corrections, primarily based on user-feedback.
# with/IN
if token[0] == "with":
tokens[i-len(b)][1] = "IN"
# such/JJ as/IN
if i > 0 and T[i-1][0] == "such" and token[0] == "as":
tokens[i-1-len(b)][1] = "JJ"
tokens[i-0-len(b)][1] = "IN"
# a/DT burning/VBG candle/NN => a/DT burning/JJ candle/NN
if token[1] == "VBG":
if T[i-1][1] == "DT" and T[i+1][1].startswith("NN"):
tokens[i-len(b)][1] = "JJ"
# een/DT brandende/VBG kaars/NN => een/DT brandende/JJ kaars/NN
if token[1].startswith("V(") and "teg_dw" in token[1]:
if T[i-1][1].startswith("Art(") and T[i+1][1].startswith("N("):
tokens[i-len(b)][1] = "JJ"
return tokens
#### BRILL LEXICON #################################################################################
class Lexicon(dict):
def __init__(self, path=os.path.join(MODULE, "Brill_lexicon.txt")):
self.path = path
self.lexical_rules = LexicalRules(self)
self.contextual_rules = ContextualRules(self)
def load(self):
# Brill's lexicon is a list of common tokens and their part-of-speech tag.
# It takes a while to load but this happens only once when pattern.en.parser.parse() is called.
# Create a dictionary from the entries:
dict.__init__(self, (x.split(" ")[:2] for x in open(self.path).read().splitlines()))
def get(self, word, default=None):
return word in self and dict.__getitem__(self, word) or default
def __contains__(self, word):
if len(self) == 0:
self.load()
return dict.__contains__(self, word)
def __getitem__(self, word):
if len(self) == 0:
self.load()
return dict.__getitem__(self, word)
def __setitem__(self, word, pos):
if len(self) == 0:
self.load()
return dict.__setitem__(self, word, pos)
def keys(self):
if len(self) == 0:
self.load()
return dict.keys(self)
def values(self):
if len(self) == 0:
self.load()
return dict.values(self)
def items(self):
if len(self) == 0:
self.load()
return dict.items(self) | bsd-3-clause | 5,150,956,330,610,113,000 | 44.745192 | 103 | 0.487387 | false | 3.395432 | false | false | false |
sunfishcode/cretonne | lib/cretonne/meta/semantics/__init__.py | 1 | 3019 | """Definitions for the semantics segment of the Cretonne language."""
from cdsl.ti import TypeEnv, ti_rtl, get_type_env
from cdsl.operands import ImmediateKind
from cdsl.ast import Var
try:
from typing import List, Dict, Tuple # noqa
from cdsl.ast import VarAtomMap # noqa
from cdsl.xform import XForm, Rtl # noqa
from cdsl.ti import VarTyping # noqa
from cdsl.instructions import Instruction, InstructionSemantics # noqa
except ImportError:
pass
def verify_semantics(inst, src, xforms):
# type: (Instruction, Rtl, InstructionSemantics) -> None
"""
Verify that the semantics transforms in xforms correctly describe the
instruction described by the src Rtl. This involves checking that:
0) src is a single instance of inst
1) For all x\in xforms x.src is a single instance of inst
2) For any concrete values V of Literals in inst:
For all concrete typing T of inst:
Exists single x \in xforms that applies to src conretazied to V
and T
"""
# 0) The source rtl is always a single instance of inst
assert len(src.rtl) == 1 and src.rtl[0].expr.inst == inst
# 1) For all XForms x, x.src is a single instance of inst
for x in xforms:
assert len(x.src.rtl) == 1 and x.src.rtl[0].expr.inst == inst
variants = [src] # type: List[Rtl]
# 2) For all enumerated immediates, compute all the possible
# versions of src with the concrete value filled in.
for i in inst.imm_opnums:
op = inst.ins[i]
if not (isinstance(op.kind, ImmediateKind) and
op.kind.is_enumerable()):
continue
new_variants = [] # type: List[Rtl]
for rtl_var in variants:
s = {v: v for v in rtl_var.vars()} # type: VarAtomMap
arg = rtl_var.rtl[0].expr.args[i]
assert isinstance(arg, Var)
for val in op.kind.possible_values():
s[arg] = val
new_variants.append(rtl_var.copy(s))
variants = new_variants
# For any possible version of the src with concrete enumerated immediates
for src in variants:
# 2) Any possible typing should be covered by exactly ONE semantic
# XForm
src = src.copy({})
typenv = get_type_env(ti_rtl(src, TypeEnv()))
typenv.normalize()
typenv = typenv.extract()
for t in typenv.concrete_typings():
matching_xforms = [] # type: List[XForm]
for x in xforms:
if src.substitution(x.src, {}) is None:
continue
# Translate t using x.symtab
t = {x.symtab[str(v)]: tv for (v, tv) in t.items()}
if (x.ti.permits(t)):
matching_xforms.append(x)
assert len(matching_xforms) == 1,\
("Possible typing {} of {} not matched by exactly one case " +
": {}").format(t, src.rtl[0], matching_xforms)
| apache-2.0 | -6,198,810,019,702,982,000 | 38.207792 | 79 | 0.595893 | false | 3.704294 | false | false | false |
emsrc/daeso-framework | lib/daeso/pair.py | 1 | 1875 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2013 by Erwin Marsi and TST-Centrale
#
# This file is part of the DAESO Framework.
#
# The DAESO Framework is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# The DAESO Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
generic Pair class for a pair of source and target items (strings, nodes,
graphs, ...)
"""
__authors__ = "Erwin Marsi <[email protected]>"
# used namedtuple before, but immutable attributes turned out to be
# inconvenient on a number of occassions
class Pair(object):
"""
Pair of source and target objects
"""
def __init__(self, source=None, target=None):
self.set(source, target)
def __eq__(self, other):
if isinstance(other, Pair):
return ( self.source == other.source and
self.target == other.target )
def __repr__(self):
return 'Pair(source={pair.source!r}, target={pair.target!r})'.format(
pair=self)
def __str__(self):
return 'Pair(source={pair.source}, target={pair.target})'.format(
pair=self)
def __iter__(self):
return (role for role in (self.source, self.target))
def set(self, source=None, target=None):
self. source = source
self.target = target
| gpl-3.0 | -5,680,566,909,070,398,000 | 28.296875 | 77 | 0.6368 | false | 3.90625 | false | false | false |
emulienfou/docker-stack | setup.py | 1 | 1666 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
import os
import re
from setuptools import find_packages
from setuptools import setup
def read(*parts):
path = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(path, encoding='utf-8') as fobj:
return fobj.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
install_requires = [
'gitpython >= 2',
'jinja2 >= 2',
'docopt >= 0.6.1, < 0.7',
'pyyaml >= 3.12',
'validators',
'requests',
'clint',
'PyYAML >= 3.08',
'prettytable'
]
setup(
name='docker-stack',
version=find_version("dockerstack", "__init__.py"),
description='This tool is used to generate easily and dynamically config files for docker.',
author='DSanchez',
author_email='[email protected]',
url='',
license='Apache License 2.0',
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
entry_points="""
[console_scripts]
docker-stack=dockerstack.main:main
""",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
)
| mit | 1,364,647,457,032,078,000 | 25.444444 | 96 | 0.620648 | false | 3.661538 | false | false | false |
dstufft/warehouse | warehouse/migrations/versions/0864352e2168_drop_duplicate_indexes.py | 1 | 1869 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Drop duplicate indexes
Revision ID: 0864352e2168
Revises: 6a6eb0a95603
Create Date: 2018-08-15 20:27:08.429077
"""
from alembic import op
revision = "0864352e2168"
down_revision = "6a6eb0a95603"
def upgrade():
# This is an exact duplicate of the accounts_email_email_key index, minus the unique
# constraint.
op.drop_index("accounts_email_email_like", table_name="accounts_email")
# This is an exact duplicate of the journals_pkey index, minus the primary key
# constraint.
op.drop_index("journals_id_idx", table_name="journals")
# This is an exact duplicate of the trove_classifiers_classifier_key index, minus
# the unique constraint.
op.drop_index("trove_class_class_idx", table_name="trove_classifiers")
# This is an exact duplicate of the trove_classifiers_pkey index, minus the primary
# key constraint.
op.drop_index("trove_class_id_idx", table_name="trove_classifiers")
def downgrade():
op.create_index("trove_class_id_idx", "trove_classifiers", ["id"], unique=False)
op.create_index(
"trove_class_class_idx", "trove_classifiers", ["classifier"], unique=False
)
op.create_index("journals_id_idx", "journals", ["id"], unique=False)
op.create_index(
"accounts_email_email_like", "accounts_email", ["email"], unique=False
)
| apache-2.0 | 1,130,926,108,418,970,900 | 36.38 | 88 | 0.718566 | false | 3.526415 | false | false | false |
JudTown17/solutions-geoprocessing-toolbox | operational_graphics/toolboxes/scripts/RangeFanByBearingAndTraversal.py | 1 | 8907 |
#------------------------------------------------------------------------------
# Copyright 2014 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# ==================================================
# UpdateRangeFans.py
# --------------------------------------------------
# Built for ArcGIS 10.1
# ==================================================
# IMPORTS ==========================================
import os, sys, math, traceback
import arcpy
from arcpy import env
# ARGUMENTS & LOCALS ===============================
inFeature = arcpy.GetParameterAsText(0)
weaponTable = arcpy.GetParameterAsText(1)
weaponField = arcpy.GetParameterAsText(2)
weaponModel = arcpy.GetParameterAsText(3)
maxRangeField = arcpy.GetParameterAsText(4)
maxRange = float(arcpy.GetParameterAsText(5)) #1000.0 # meters
geoBearing = float(arcpy.GetParameterAsText(6)) #45.0 # degrees
traversal = float(arcpy.GetParameterAsText(7)) #60.0 # degrees
outFeature = arcpy.GetParameterAsText(8)
deleteme = []
debug = True
leftAngle = 0.0 # degrees
rightAngle = 90.0 # degrees
# CONSTANTS ========================================
# FUNCTIONS ========================================
def Geo2Arithmetic(inAngle):
outAngle = -1.0
# force input angle into 0 to 360 range
if (inAngle > 360.0):
inAngle = math.fmod(inAngle,360.0)
# if 360, make it zero
if inAngle == 360.0: inAngle = 0.0
#0 to 90
if (inAngle >= 0.0 and inAngle <= 90.0):
outAngle = math.fabs(inAngle - 90.0)
# 90 to 360
if (inAngle > 90.0 and inAngle < 360.0):
outAngle = 360.0 - (inAngle - 90.0)
if debug == True: arcpy.AddMessage("G2A inAngle(" + str(inAngle) + "), outAngle(" + str(outAngle) + ")")
return outAngle
try:
currentOverwriteOutput = env.overwriteOutput
env.overwriteOutput = True
sr = arcpy.SpatialReference()
sr.factoryCode = 4326
sr.create()
GCS_WGS_1984 = sr
#GCS_WGS_1984 = arcpy.SpatialReference(r"WGS 1984")
wbsr = arcpy.SpatialReference()
wbsr.factoryCode = 3857
wbsr.create()
webMercator = wbsr
#webMercator = arcpy.SpatialReference(r"WGS 1984 Web Mercator (Auxiliary Sphere)")
env.overwriteOutput = True
scratch = env.scratchWorkspace
#Project doesn't like in_memory featureclasses, copy to scratch
copyInFeatures = os.path.join(scratch,"copyInFeatures")
arcpy.CopyFeatures_management(inFeature,copyInFeatures)
deleteme.append(copyInFeatures)
prjInFeature = os.path.join(scratch,"prjInFeature")
srInputPoints = arcpy.Describe(copyInFeatures).spatialReference
arcpy.AddMessage("Projecting input points to Web Mercator ...")
arcpy.Project_management(copyInFeatures,prjInFeature,webMercator)
deleteme.append(prjInFeature)
tempFans = os.path.join(scratch,"tempFans")
# put bearing into 0 - 360 range
geoBearing = math.fmod(geoBearing,360.0)
if debug == True: arcpy.AddMessage("geoBearing: " + str(geoBearing))
arithmeticBearing = Geo2Arithmetic(geoBearing) # need to convert from geographic angles (zero north clockwise) to arithmetic (zero east counterclockwise)
if debug == True: arcpy.AddMessage("arithmeticBearing: " + str(arithmeticBearing))
if traversal == 0.0:
traversal = 1.0 # modify so there is at least 1 degree of angle.
arcpy.AddWarning("Traversal is zero! Forcing traversal to 1.0 degrees.")
leftAngle = arithmeticBearing + (traversal / 2.0) # get left angle (arithmetic)
leftBearing = geoBearing - (traversal / 2.0) # get left bearing (geographic)
if leftBearing < 0.0: leftBearing = 360.0 + leftBearing
rightAngle = arithmeticBearing - (traversal / 2.0) # get right angle (arithmetic)
rightBearing = geoBearing + (traversal / 2.0) # get right bearing (geographic)
if rightBearing < 0.0: rightBearing = 360.0 + rightBearing
if debug == True: arcpy.AddMessage("arithemtic left/right: " + str(leftAngle) + "/" + str(rightAngle))
if debug == True: arcpy.AddMessage("geo left/right: " + str(leftBearing) + "/" + str(rightBearing))
centerPoints = []
arcpy.AddMessage("Getting centers ....")
shapefieldname = arcpy.Describe(prjInFeature).ShapeFieldName
rows = arcpy.SearchCursor(prjInFeature)
for row in rows:
feat = row.getValue(shapefieldname)
pnt = feat.getPart()
centerPointX = pnt.X
centerPointY = pnt.Y
centerPoints.append([centerPointX,centerPointY])
del row
del rows
paths = []
arcpy.AddMessage("Creating paths ...")
for centerPoint in centerPoints:
path = []
centerPointX = centerPoint[0]
centerPointY = centerPoint[1]
path.append([centerPointX,centerPointY]) # add first point
step = -1.0 # step in degrees
rightAngleRelativeToLeft = leftAngle - traversal - 1
#for d in xrange(int(leftAngle),int(rightAngleRelativeToLeft),int(step)): #UPDATE
for d in range(int(leftAngle),int(rightAngleRelativeToLeft),int(step)):
x = centerPointX + (maxRange * math.cos(math.radians(d)))
y = centerPointY + (maxRange * math.sin(math.radians(d)))
path.append([x,y])
if debug == True: arcpy.AddMessage("d,x,y: " + str(d) + "," + str(x) + "," + str(y))
path.append([centerPointX,centerPointY]) # add last point
paths.append(path)
if debug == True: arcpy.AddMessage("Points in path: " + str(len(path)))
if debug == True: arcpy.AddMessage("paths: " + str(paths))
arcpy.AddMessage("Creating target feature class ...")
arcpy.CreateFeatureclass_management(os.path.dirname(tempFans),os.path.basename(tempFans),"Polygon","#","DISABLED","DISABLED",webMercator)
arcpy.AddField_management(tempFans,"Range","DOUBLE","#","#","#","Range (meters)")
arcpy.AddField_management(tempFans,"Bearing","DOUBLE","#","#","#","Bearing (degrees)")
arcpy.AddField_management(tempFans,"Traversal","DOUBLE","#","#","#","Traversal (degrees)")
arcpy.AddField_management(tempFans,"LeftAz","DOUBLE","#","#","#","Left Bearing (degrees)")
arcpy.AddField_management(tempFans,"RightAz","DOUBLE","#","#","#","Right Bearing (degrees)")
arcpy.AddField_management(tempFans,"Model","TEXT","#","#","#","Weapon Model")
deleteme.append(tempFans)
arcpy.AddMessage("Building " + str(len(paths)) + " fans ...")
cur = arcpy.InsertCursor(tempFans)
for outPath in paths:
lineArray = arcpy.Array()
for vertex in outPath:
pnt = arcpy.Point()
pnt.X = vertex[0]
pnt.Y = vertex[1]
lineArray.add(pnt)
del pnt
feat = cur.newRow()
feat.shape = lineArray
feat.Range = maxRange
feat.Bearing = geoBearing
feat.Traversal = traversal
feat.LeftAz = leftBearing
feat.RightAz = rightBearing
feat.Model = str(weaponModel)
cur.insertRow(feat)
del lineArray
del feat
del cur
arcpy.AddMessage("Projecting Range Fans back to " + str(srInputPoints.name))
arcpy.Project_management(tempFans,outFeature,srInputPoints)
arcpy.SetParameter(8,outFeature)
except arcpy.ExecuteError:
# Get the tool error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
#print msgs #UPDATE
print(msgs)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "\nArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
#print pymsg + "\n" #UPDATE
print(pymsg + "\n")
#print msgs #UPDATE
print(msgs)
finally:
# cleanup intermediate datasets
if debug == True: arcpy.AddMessage("Removing intermediate datasets...")
for i in deleteme:
if debug == True: arcpy.AddMessage("Removing: " + str(i))
arcpy.Delete_management(i)
if debug == True: arcpy.AddMessage("Done")
| apache-2.0 | -6,732,843,962,256,462,000 | 38.941704 | 157 | 0.629168 | false | 3.69892 | false | false | false |
alephdata/aleph | aleph/model/event.py | 1 | 3778 | from flask_babel import lazy_gettext
from aleph.model.role import Role
from aleph.model.alert import Alert
from aleph.model.entity import Entity
from aleph.model.entityset import EntitySet
from aleph.model.collection import Collection
from aleph.model.export import Export
class Event(object):
def __init__(self, title, template, params, link_to):
self.name = None
self.title = title
self.template = template
self.params = params
self.link_to = link_to
def to_dict(self):
return {
"name": self.name,
"title": self.title,
"template": self.template,
"params": {p: c.__name__.lower() for (p, c) in self.params.items()},
}
class EventsRegistry(type):
def __init__(cls, name, bases, dct):
cls.registry = {}
for ename, event in dct.items():
if isinstance(event, Event):
event.name = ename
cls.registry[ename] = event
super(EventsRegistry, cls).__init__(name, bases, dct)
class Events(object, metaclass=EventsRegistry):
@classmethod
def get(cls, name):
return cls.registry.get(name)
@classmethod
def names(cls):
return list(cls.registry.keys())
# CREATE COLLECTION
CREATE_COLLECTION = Event(
title=lazy_gettext("New datasets"),
template=lazy_gettext("{{actor}} created {{collection}}"),
params={"collection": Collection},
link_to="collection",
)
# UPLOAD DOCUMENT
INGEST_DOCUMENT = Event(
title=lazy_gettext("Document uploads"),
template=lazy_gettext("{{actor}} added {{document}} to {{collection}}"),
params={"document": Entity, "collection": Collection},
link_to="document",
)
# EXECUTE MAPPING
LOAD_MAPPING = Event(
title=lazy_gettext("Entities generated"),
template=lazy_gettext(
"{{actor}} generated entities from {{table}} in {{collection}}"
),
params={"table": Entity, "collection": Collection},
link_to="table",
)
# CREATE DIAGRAM
CREATE_DIAGRAM = Event(
title=lazy_gettext("New network diagram"),
template=lazy_gettext(
"{{actor}} began diagramming {{diagram}} in {{collection}}"
),
params={"diagram": EntitySet, "collection": Collection},
link_to="table",
)
# CREATE ENTITYSET
CREATE_ENTITYSET = Event(
title=lazy_gettext("New diagrams and lists"),
template=lazy_gettext("{{actor}} created {{entityset}} in {{collection}}"),
params={"entityset": EntitySet, "collection": Collection},
link_to="table",
)
# ALERT MATCH
MATCH_ALERT = Event(
title=lazy_gettext("Alert notifications"),
template=lazy_gettext("{{entity}} matches your alert for {{alert}}"), # noqa
params={"entity": Entity, "alert": Alert, "role": Role},
link_to="entity",
)
# GRANT COLLECTION
GRANT_COLLECTION = Event(
title=lazy_gettext("Dataset access change"),
template=lazy_gettext(
"{{actor}} gave {{role}} access to {{collection}}"
), # noqa
params={"collection": Collection, "role": Role},
link_to="collection",
)
# PUBLISH COLLECTION
PUBLISH_COLLECTION = Event(
title=lazy_gettext("Dataset published"),
template=lazy_gettext("{{actor}} published {{collection}}"),
params={"collection": Collection},
link_to="collection",
)
# EXPORT PUBLISHED
COMPLETE_EXPORT = Event(
title=lazy_gettext("Exports completed"),
template=lazy_gettext("{{export}} is ready for download"),
params={"export": Export},
link_to="export",
)
| mit | -196,276,075,847,943,100 | 29.715447 | 85 | 0.5937 | false | 4.106522 | false | false | false |
katakumpo/nicedjango | nicedjango/utils/queries.py | 1 | 1448 | from operator import itemgetter
from nicedjango.utils.py import sliceable_as_chunks
from nicedjango.utils.py.chunk import as_chunks
from nicedjango.utils.py.iter import partition
from nicedjango.utils.py.operator import item_in
__all__ = ['partition_existing_pks', 'get_pks_queryset', 'queryset_as_chunks']
def partition_existing_pks(model, pk_index, values_list):
queryset = get_pks_queryset(model)
existing_pks = queryset_pk_in(queryset, map(itemgetter(pk_index), values_list))
return partition(item_in(pk_index, existing_pks), values_list)
def get_pks_queryset(model):
return model._default_manager.values_list(model._meta.pk.name, flat=True)
def queryset_pk_in(queryset, pks):
return queryset_in(queryset, queryset.model._meta.pk.name, pks)
def queryset_in(queryset, name, values):
filters = {'%s__in' % name: values}
return queryset.filter(**filters)
def queryset_in_list_getter(queryset, name):
def queryset_in_list_getter_(values):
return list(queryset_in(queryset, name, values))
return queryset_in_list_getter_
def queryset_as_chunks(queryset, chunksize=None, name=None, pks=None):
if name is not None and pks is not None:
values_getter = queryset_in_list_getter(queryset, name)
for chunk in as_chunks(pks, chunksize, None, values_getter):
yield chunk
else:
for chunk in sliceable_as_chunks(queryset, chunksize):
yield chunk
| mit | -5,210,880,107,361,234,000 | 32.674419 | 83 | 0.713398 | false | 3.480769 | false | false | false |
AWilliams17/WebLair | WebLair/settings.py | 1 | 3820 | """
Django settings for WebLair project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import confidential # because for some reason django mixes sensitive info with non-sensitive info. 10/10 framework.
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = confidential.SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [''] # Host here
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SECURE_SSL_REDIRECT = True
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_markup',
'MyWebLair',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'WebLair.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'WebLair.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': confidential.NAMEDB,
'USER': confidential.USERDB,
'PASSWORD': confidential.PASSWORDDB,
'HOST': confidential.HOSTDB,
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
'''
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
'''
STATIC_ROOT = "/home/AWilliams/WebLair/static"
STATIC_URL = '/static/'
| gpl-3.0 | -2,623,914,492,151,416,300 | 26.681159 | 116 | 0.694241 | false | 3.540315 | false | false | false |
cogniteev/docido-python-sdk | docido_sdk/toolbox/date_ext.py | 1 | 5758 |
from datetime import datetime
import re
import sys
from dateutil import parser
import pytz
import six
from . text import levenshtein
UTC_EPOCH = datetime(1970, 1, 1).replace(tzinfo=pytz.utc)
MAX_POSIX_TIMESTAMP = pow(2, 32) - 1
DAYS = {
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday'
}
DAYS_ABBR = [day[:3] for day in DAYS]
class timestamp_ms(object):
"""Build UTC timestamp in milliseconds
"""
TIMEZONE_PARENTHESIS = re.compile('(.*)\(([a-zA-Z]+)[-+0-9:.]*\)$')
TIMEZONE_SEPARATOR = re.compile('(.* .*)(\d\d)[.-](\d\d)$')
QUOTED_TIMEZONE = re.compile("""(.*)['"]([\w:+-]+)['"]?$""")
START_WITH_DAY_OF_WEEK = re.compile('^([a-zA-Z]*)[\s,](.*)')
@classmethod
def feeling_lucky(cls, obj):
"""Tries to convert given object to an UTC timestamp is ms, based
on its type.
"""
if isinstance(obj, six.string_types):
return cls.from_str(obj)
elif isinstance(obj, six.integer_types) and obj <= MAX_POSIX_TIMESTAMP:
return cls.from_posix_timestamp(obj)
elif isinstance(obj, datetime):
return cls.from_datetime(obj)
else:
raise ValueError(
u"Don't know how to get timestamp from '{}'".format(obj)
)
@classmethod
def fix_mispelled_day(cls, timestr):
"""fix mispelled day when written in english
:return: `None` if the day was not modified, the new date otherwise
"""
day_extraction = cls.START_WITH_DAY_OF_WEEK.match(timestr)
if day_extraction is not None:
day = day_extraction.group(1).lower()
if len(day) == 3:
dataset = DAYS_ABBR
else:
dataset = DAYS
if day not in dataset:
days = list(dataset)
days.sort(key=lambda e: levenshtein(day, e))
return days[0] + day_extraction.group(2)
@classmethod
def remove_parenthesis_around_tz(cls, timestr):
"""get rid of parenthesis around timezone: (GMT) => GMT
:return: the new string if parenthesis were found, `None` otherwise
"""
parenthesis = cls.TIMEZONE_PARENTHESIS.match(timestr)
if parenthesis is not None:
return parenthesis.group(1)
@classmethod
def remove_quotes_around_tz(cls, timestr):
"""Remove quotes (single and double) around timezone otherwise
`dateutil.parser.parse` raises
"""
quoted = cls.QUOTED_TIMEZONE.match(timestr)
if quoted is not None:
return quoted.group(1) + quoted.group(2)
@classmethod
def remove_timezone(cls, timestr):
"""Completely remove timezone information, if any.
:return: the new string if timezone was found, `None` otherwise
"""
if re.match(r".*[\-+]?\d{2}:\d{2}$", timestr):
return re.sub(
r"(.*)(\s[\+-]?\d\d:\d\d)$",
r"\1",
timestr
)
@classmethod
def fix_timezone_separator(cls, timestr):
"""Replace invalid timezone separator to prevent
`dateutil.parser.parse` to raise.
:return: the new string if invalid separators were found,
`None` otherwise
"""
tz_sep = cls.TIMEZONE_SEPARATOR.match(timestr)
if tz_sep is not None:
return tz_sep.group(1) + tz_sep.group(2) + ':' + tz_sep.group(3)
return timestr
@classmethod
def from_str(cls, timestr, shaked=False):
"""Use `dateutil` module to parse the give string
:param basestring timestr: string representing a date to parse
:param bool shaked: whether the input parameter been already
cleaned or not.
"""
orig = timestr
if not shaked:
timestr = cls.fix_timezone_separator(timestr)
try:
date = parser.parse(timestr)
except ValueError:
if not shaked:
shaked = False
for shaker in [
cls.fix_mispelled_day,
cls.remove_parenthesis_around_tz,
cls.remove_quotes_around_tz]:
new_timestr = shaker(timestr)
if new_timestr is not None:
timestr = new_timestr
shaked = True
if shaked:
try:
return cls.from_str(timestr, shaked=True)
except ValueError:
# raise ValueError below with proper message
pass
msg = u"Unknown string format: {!r}".format(orig)
raise ValueError(msg), None, sys.exc_info()[2]
else:
try:
return cls.from_datetime(date)
except ValueError:
new_str = cls.remove_timezone(orig)
if new_str is not None:
return cls.from_str(new_str)
else:
raise
@classmethod
def from_ymd(cls, year, month=1, day=1):
return cls.from_datetime(datetime(
year=year, month=month, day=day
))
@classmethod
def from_posix_timestamp(cls, ts):
return cls.from_datetime(datetime.utcfromtimestamp(ts))
@classmethod
def from_datetime(cls, date):
if date.tzinfo is None:
date = date.replace(tzinfo=pytz.utc)
seconds = (date - UTC_EPOCH).total_seconds() * 1e3
micro_seconds = date.microsecond / 1e3
return int(seconds + micro_seconds)
@classmethod
def now(cls):
return cls.from_datetime(datetime.utcnow())
| apache-2.0 | -3,380,683,506,579,261,000 | 31.348315 | 79 | 0.547586 | false | 3.984775 | false | false | false |
allo-/django-bingo-tweet | setup.py | 1 | 1212 | import os
import sys
from setuptools import setup
from setuptools.command.install_lib import install_lib as _install_lib
with open('requirements.txt') as f:
required = f.read().splitlines()
class install_lib(_install_lib):
def run(self):
from django.core.management.commands.compilemessages \
import compile_messages
os.chdir('bingo_tweets')
compile_messages(sys.stderr)
os.chdir("..")
setup(name='django-bingo-tweets',
description='Bingo Tweets',
long_description='Tweet if a new game '
'is created in a django-bingo instance',
author='Alexander Schier',
author_email='[email protected]',
version='1.1.0',
url='https://github.com/allo-/django-bingo-tweet',
packages=['bingo_tweets'],
package_data={'bingo_tweets': ['locale/*/LC_MESSAGES/*.*']},
include_package_data=True,
install_requires=required,
classifiers=[
'Framework :: Django',
'Topic :: Games/Entertainment :: Board Games',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Programming Language :: Python'
]
)
| agpl-3.0 | 3,360,659,025,379,820,500 | 30.894737 | 76 | 0.629538 | false | 3.823344 | false | false | false |
total-impact/total-impact-core | test/unit_tests/test_backend.py | 2 | 18627 | import json, os, Queue, datetime
from totalimpact import tiredis, backend, default_settings
from totalimpact import db, app
from totalimpact import item as item_module
from totalimpact.providers.provider import Provider, ProviderTimeout, ProviderFactory
from totalimpact import REDIS_UNITTEST_DATABASE_NUMBER
from nose.tools import raises, assert_equals, nottest
from test.utils import slow
from test import mocks
from test.utils import setup_postgres_for_unittests, teardown_postgres_for_unittests
class TestBackend():
def setUp(self):
self.config = None #placeholder
self.TEST_PROVIDER_CONFIG = [
("wikipedia", {})
]
self.d = None
# do the same thing for the redis db, set up the test redis database. We're using DB Number 8
self.r = tiredis.from_url("redis://localhost:6379", db=REDIS_UNITTEST_DATABASE_NUMBER)
self.r.flushdb()
provider_queues = {}
providers = ProviderFactory.get_providers(self.TEST_PROVIDER_CONFIG)
for provider in providers:
provider_queues[provider.provider_name] = backend.PythonQueue(provider.provider_name+"_queue")
self.b = backend.Backend(
backend.RedisQueue("alias-unittest", self.r),
provider_queues,
[backend.PythonQueue("couch_queue")],
self.r)
self.fake_item = {
"_id": "1",
"type": "item",
"num_providers_still_updating":1,
"aliases":{"pmid":["111"]},
"biblio": {},
"metrics": {},
"last_modified": datetime.datetime(2013, 1, 1)
}
self.fake_aliases_dict = {"pmid":["222"]}
self.tiid = "abcd"
self.db = setup_postgres_for_unittests(db, app)
def teardown(self):
self.r.flushdb()
teardown_postgres_for_unittests(self.db)
class TestProviderWorker(TestBackend):
# warning: calls live provider right now
def test_add_to_couch_queue_if_nonzero(self):
test_couch_queue = backend.PythonQueue("test_couch_queue")
provider_worker = backend.ProviderWorker(mocks.ProviderMock("myfakeprovider"),
None, None, None, {"a": test_couch_queue}, None, self.r)
response = provider_worker.add_to_couch_queue_if_nonzero("aaatiid", #start fake tiid with "a" so in first couch queue
{"doi":["10.5061/dryad.3td2f"]},
"aliases",
"dummy")
# test that it put it on the queue
in_queue = test_couch_queue.pop()
expected = {'method_name': 'aliases', 'tiid': 'aaatiid', 'provider_name': 'myfakeprovider', 'analytics_credentials': 'dummy', 'new_content': {'doi': ['10.5061/dryad.3td2f']}}
assert_equals(in_queue, expected)
def test_add_to_couch_queue_if_nonzero_given_metrics(self):
test_couch_queue = backend.PythonQueue("test_couch_queue")
provider_worker = backend.ProviderWorker(mocks.ProviderMock("myfakeprovider"),
None, None, None, {"a": test_couch_queue}, None, self.r)
metrics_method_response = {'dryad:package_views': (361, 'http://dx.doi.org/10.5061/dryad.7898'),
'dryad:total_downloads': (176, 'http://dx.doi.org/10.5061/dryad.7898'),
'dryad:most_downloaded_file': (65, 'http://dx.doi.org/10.5061/dryad.7898')}
response = provider_worker.add_to_couch_queue_if_nonzero("aaatiid", #start fake tiid with "a" so in first couch queue
metrics_method_response,
"metrics",
"dummy")
# test that it put it on the queue
in_queue = test_couch_queue.pop()
expected = {'method_name': 'metrics', 'tiid': 'aaatiid', 'provider_name': 'myfakeprovider', 'analytics_credentials': 'dummy', 'new_content': metrics_method_response}
print in_queue
assert_equals(in_queue, expected)
# check nothing in redis since it had a value
response = self.r.get_num_providers_currently_updating("aaatiid")
assert_equals(response, 0)
def test_add_to_couch_queue_if_nonzero_given_empty_metrics_response(self):
test_couch_queue = backend.PythonQueue("test_couch_queue")
provider_worker = backend.ProviderWorker(mocks.ProviderMock("myfakeprovider"),
None, None, None, {"a": test_couch_queue}, None, self.r)
metrics_method_response = {}
response = provider_worker.add_to_couch_queue_if_nonzero("aaatiid", #start fake tiid with "a" so in first couch queue
metrics_method_response,
"metrics",
"dummy")
# test that it did not put it on the queue
in_queue = test_couch_queue.pop()
expected = None
assert_equals(in_queue, expected)
# check decremented in redis since the payload was null
response = num_left = self.r.get_num_providers_currently_updating("aaatiid")
assert_equals(response, 0)
def test_wrapper(self):
def fake_callback(tiid, new_content, method_name, analytics_credentials, aliases_providers_run):
pass
response = backend.ProviderWorker.wrapper("123",
{'url': ['http://somewhere'], 'doi': ['10.123']},
mocks.ProviderMock("myfakeprovider"),
"aliases",
{}, # credentials
[], # aliases previously run
fake_callback)
print response
expected = {'url': ['http://somewhere'], 'doi': ['10.1', '10.123']}
assert_equals(response, expected)
class TestCouchWorker(TestBackend):
def test_update_item_with_new_aliases(self):
response = backend.CouchWorker.update_item_with_new_aliases(self.fake_aliases_dict, self.fake_item)
expected = {'metrics': {}, 'num_providers_still_updating': 1, 'biblio': {}, '_id': '1', 'type': 'item',
'aliases': {'pmid': ['222', '111']}, 'last_modified': datetime.datetime(2013, 1, 1, 0, 0)}
assert_equals(response, expected)
def test_update_item_with_new_aliases_using_dup_alias(self):
dup_alias_dict = self.fake_item["aliases"]
response = backend.CouchWorker.update_item_with_new_aliases(dup_alias_dict, self.fake_item)
expected = None # don't return the item if it already has all the aliases in it
assert_equals(response, expected)
def test_update_item_with_new_biblio(self):
new_biblio_dict = {"title":"A very good paper", "authors":"Smith, Lee, Khun"}
response = backend.CouchWorker.update_item_with_new_biblio(new_biblio_dict, self.fake_item)
expected = new_biblio_dict
assert_equals(response["biblio"], expected)
def test_update_item_with_new_biblio_existing_biblio(self):
item_with_some_biblio = self.fake_item
item_with_some_biblio["biblio"] = {"title":"Different title"}
new_biblio_dict = {"title":"A very good paper", "authors":"Smith, Lee, Khun"}
response = backend.CouchWorker.update_item_with_new_biblio(new_biblio_dict, item_with_some_biblio)
expected = {"authors": new_biblio_dict["authors"]}
assert_equals(response["biblio"], expected)
def test_update_item_with_new_metrics(self):
response = backend.CouchWorker.update_item_with_new_metrics("mendeley:groups", (3, "http://provenance"), self.fake_item)
expected = {'mendeley:groups': {'provenance_url': 'http://provenance', 'values': {'raw': 3, 'raw_history': {'2012-09-15T21:39:39.563710': 3}}}}
print response["metrics"]
assert_equals(response["metrics"]['mendeley:groups']["provenance_url"], 'http://provenance')
assert_equals(response["metrics"]['mendeley:groups']["values"]["raw"], 3)
assert_equals(response["metrics"]['mendeley:groups']["values"]["raw_history"].values(), [3])
# check year starts with 20
assert_equals(response["metrics"]['mendeley:groups']["values"]["raw_history"].keys()[0][0:2], "20")
def test_run_nothing_in_queue(self):
test_couch_queue = backend.PythonQueue("test_couch_queue")
couch_worker = backend.CouchWorker(test_couch_queue, self.r, self.d)
response = couch_worker.run()
expected = None
assert_equals(response, expected)
def test_run_aliases_in_queue(self):
test_couch_queue = backend.PythonQueue("test_couch_queue")
test_couch_queue_dict = {self.fake_item["_id"][0]:test_couch_queue}
provider_worker = backend.ProviderWorker(mocks.ProviderMock("myfakeprovider"),
None, None, None, test_couch_queue_dict, None, self.r)
response = provider_worker.add_to_couch_queue_if_nonzero(self.fake_item["_id"],
{"doi":["10.5061/dryad.3td2f"]},
"aliases",
"dummy")
# save basic item beforehand
item_obj = item_module.create_objects_from_item_doc(self.fake_item)
self.db.session.add(item_obj)
self.db.session.commit()
# run
couch_worker = backend.CouchWorker(test_couch_queue, self.r, self.d)
response = couch_worker.run()
expected = None
assert_equals(response, expected)
# check couch_queue has value after
response = item_module.get_item(self.fake_item["_id"], {}, self.r)
print response
expected = {'pmid': ['111'], 'doi': ['10.5061/dryad.3td2f']}
assert_equals(response["aliases"], expected)
# check has updated last_modified time
now = datetime.datetime.utcnow().isoformat()
assert_equals(response["last_modified"][0:10], now[0:10])
def test_run_metrics_in_queue(self):
test_couch_queue = backend.PythonQueue("test_couch_queue")
test_couch_queue_dict = {self.fake_item["_id"][0]:test_couch_queue}
provider_worker = backend.ProviderWorker(mocks.ProviderMock("myfakeprovider"),
None, None, None, test_couch_queue_dict, None, self.r)
metrics_method_response = {'dryad:package_views': (361, 'http://dx.doi.org/10.5061/dryad.7898'),
'dryad:total_downloads': (176, 'http://dx.doi.org/10.5061/dryad.7898'),
'dryad:most_downloaded_file': (65, 'http://dx.doi.org/10.5061/dryad.7898')}
response = provider_worker.add_to_couch_queue_if_nonzero(self.fake_item["_id"],
metrics_method_response,
"metrics",
"dummy")
# save basic item beforehand
item_obj = item_module.create_objects_from_item_doc(self.fake_item)
self.db.session.add(item_obj)
self.db.session.commit()
# run
couch_worker = backend.CouchWorker(test_couch_queue, self.r, self.d)
couch_worker.run()
# check couch_queue has value after
response = item_module.get_item(self.fake_item["_id"], {}, self.r)
print response
expected = 361
assert_equals(response["metrics"]['dryad:package_views']['values']["raw"], expected)
class TestBackendClass(TestBackend):
def test_decide_who_to_call_next_unknown(self):
aliases_dict = {"unknownnamespace":["111"]}
prev_aliases = []
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect blanks
expected = {'metrics': [], 'biblio': [], 'aliases': ['webpage']}
assert_equals(response, expected)
def test_decide_who_to_call_next_unknown_after_webpage(self):
aliases_dict = {"unknownnamespace":["111"]}
prev_aliases = ["webpage"]
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect blanks
expected = {'metrics': ["wikipedia"], 'biblio': ["webpage"], 'aliases': []}
assert_equals(response, expected)
def test_decide_who_to_call_next_webpage_no_title(self):
aliases_dict = {"url":["http://a"]}
prev_aliases = []
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect all metrics and lookup the biblio
expected = {'metrics': ['wikipedia'], 'biblio': ['webpage'], 'aliases': []}
assert_equals(response, expected)
def test_decide_who_to_call_next_webpage_with_title(self):
aliases_dict = {"url":["http://a"], "title":["A Great Paper"]}
prev_aliases = []
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect all metrics, no need to look up biblio
expected = {'metrics': ['wikipedia'], 'biblio': ['webpage'], 'aliases': []}
assert_equals(response, expected)
def test_decide_who_to_call_next_slideshare_no_title(self):
aliases_dict = {"url":["http://abc.slideshare.net/def"]}
prev_aliases = []
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect all metrics and look up the biblio
expected = {'metrics': ['wikipedia'], 'biblio': ['slideshare'], 'aliases': []}
assert_equals(response, expected)
def test_decide_who_to_call_next_dryad_no_url(self):
aliases_dict = {"doi":["10.5061/dryad.3td2f"]}
prev_aliases = ["altmetric_com"]
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect need to resolve the dryad doi before can go get metrics
expected = {'metrics': [], 'biblio': [], 'aliases': ['dryad']}
assert_equals(response, expected)
def test_decide_who_to_call_next_dryad_with_url(self):
aliases_dict = { "doi":["10.5061/dryad.3td2f"],
"url":["http://dryadsomewhere"]}
prev_aliases = ["altmetric_com"]
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# still need the dx.doi.org url
expected = {'metrics': [], 'biblio': [], 'aliases': ['dryad']}
assert_equals(response, expected)
def test_decide_who_to_call_next_dryad_with_doi_url(self):
aliases_dict = { "doi":["10.5061/dryad.3td2f"],
"url":["http://dx.doi.org/10.dryadsomewhere"]}
prev_aliases = ["altmetric_com", "dryad"]
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# have url so now can go get all the metrics
expected = {'metrics': ['wikipedia'], 'biblio': ['dryad', 'mendeley'], 'aliases': []}
assert_equals(response, expected)
def test_decide_who_to_call_next_crossref_not_run(self):
aliases_dict = {"pmid":["111"]}
prev_aliases = ["mendeley"]
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect need to get more aliases
expected = {'metrics': [], 'biblio': [], 'aliases': ['crossref']}
assert_equals(response, expected)
def test_decide_who_to_call_next_pmid_mendeley_not_run(self):
aliases_dict = {"pmid":["111"]}
prev_aliases = [""]
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect need to get more aliases
expected = {'metrics': [], 'biblio': [], 'aliases': ['mendeley']}
assert_equals(response, expected)
def test_decide_who_to_call_next_pmid_prev_run(self):
aliases_dict = { "pmid":["1111"],
"url":["http://pubmedsomewhere"]}
prev_aliases = ["pubmed", "mendeley"]
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect need to get metrics and biblio
expected = {'metrics': [], 'biblio': [], 'aliases': ['crossref']}
assert_equals(response, expected)
def test_decide_who_to_call_next_doi_with_urls(self):
aliases_dict = { "doi":["10.234/345345"],
"url":["http://journalsomewhere"]}
prev_aliases = ["pubmed", "mendeley"]
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect need to get metrics, biblio from crossref
expected = {'metrics': [], 'biblio': [], 'aliases': ['crossref']}
assert_equals(response, expected)
def test_decide_who_to_call_next_doi_crossref_prev_called(self):
aliases_dict = { "doi":["10.234/345345"],
"url":["http://journalsomewhere"]}
prev_aliases = ["crossref"]
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect need to get metrics, no biblio
expected = {'metrics': [], 'biblio': [], 'aliases': ['mendeley']}
assert_equals(response, expected)
def test_decide_who_to_call_next_doi_crossref_pubmed_mendeley_prev_called(self):
aliases_dict = { "doi":["10.234/345345"],
"url":["http://journalsomewhere"]}
prev_aliases = ["crossref", "pubmed", "mendeley", "altmetric_com"]
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect need to get metrics, no biblio
expected = {'metrics': ['wikipedia'], 'biblio': ['crossref', 'pubmed', 'mendeley', 'webpage'], 'aliases': []}
assert_equals(response, expected)
def test_decide_who_to_call_next_pmid_crossref_pubmed_prev_called(self):
aliases_dict = { "pmid":["1111"],
"url":["http://journalsomewhere"]}
prev_aliases = ["crossref", "pubmed", "mendeley", "altmetric_com"]
response = backend.Backend.sniffer(aliases_dict, prev_aliases, self.TEST_PROVIDER_CONFIG)
print response
# expect need to get metrics, no biblio
expected = {'metrics': ['wikipedia'], 'biblio': ['crossref', 'pubmed', 'mendeley', 'webpage'], 'aliases': []}
assert_equals(response, expected)
| mit | -208,706,792,003,346,430 | 47.76178 | 182 | 0.603801 | false | 3.6951 | true | false | false |
gotlium/django-geoip-redis | geoip/migrations/0001_initial.py | 1 | 8484 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Country'
db.create_table('geoip_country', (
('code', self.gf('django.db.models.fields.CharField')(max_length=2, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
))
db.send_create_signal('geoip', ['Country'])
# Adding model 'Area'
db.create_table('geoip_area', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geoip.Country'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('geoip', ['Area'])
# Adding unique constraint on 'Area', fields ['country', 'name']
db.create_unique('geoip_area', ['country_id', 'name'])
# Adding model 'City'
db.create_table('geoip_city', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('area', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geoip.Area'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('latitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=6, blank=True)),
('longitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=6, blank=True)),
))
db.send_create_signal('geoip', ['City'])
# Adding unique constraint on 'City', fields ['area', 'name']
db.create_unique('geoip_city', ['area_id', 'name'])
# Adding model 'ISP'
db.create_table('geoip_isp', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geoip.Country'])),
))
db.send_create_signal('geoip', ['ISP'])
# Adding unique constraint on 'ISP', fields ['country', 'name']
db.create_unique('geoip_isp', ['country_id', 'name'])
# Adding model 'Provider'
db.create_table('geoip_provider', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('ranges', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('geoip', ['Provider'])
# Adding M2M table for field isp on 'Provider'
db.create_table('geoip_provider_isp', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('provider', models.ForeignKey(orm['geoip.provider'], null=False)),
('isp', models.ForeignKey(orm['geoip.isp'], null=False))
))
db.create_unique('geoip_provider_isp', ['provider_id', 'isp_id'])
# Adding model 'Range'
db.create_table('geoip_range', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('start_ip', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)),
('end_ip', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geoip.Country'])),
('area', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geoip.Area'], null=True)),
('city', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geoip.City'], null=True)),
('isp', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geoip.ISP'], null=True)),
('provider', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geoip.Provider'], null=True)),
))
db.send_create_signal('geoip', ['Range'])
def backwards(self, orm):
# Removing unique constraint on 'ISP', fields ['country', 'name']
db.delete_unique('geoip_isp', ['country_id', 'name'])
# Removing unique constraint on 'City', fields ['area', 'name']
db.delete_unique('geoip_city', ['area_id', 'name'])
# Removing unique constraint on 'Area', fields ['country', 'name']
db.delete_unique('geoip_area', ['country_id', 'name'])
# Deleting model 'Country'
db.delete_table('geoip_country')
# Deleting model 'Area'
db.delete_table('geoip_area')
# Deleting model 'City'
db.delete_table('geoip_city')
# Deleting model 'ISP'
db.delete_table('geoip_isp')
# Deleting model 'Provider'
db.delete_table('geoip_provider')
# Removing M2M table for field isp on 'Provider'
db.delete_table('geoip_provider_isp')
# Deleting model 'Range'
db.delete_table('geoip_range')
models = {
'geoip.area': {
'Meta': {'unique_together': "(('country', 'name'),)", 'object_name': 'Area'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geoip.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'geoip.city': {
'Meta': {'unique_together': "(('area', 'name'),)", 'object_name': 'City'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geoip.Area']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '6', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '6', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'geoip.country': {
'Meta': {'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'geoip.isp': {
'Meta': {'unique_together': "(('country', 'name'),)", 'object_name': 'ISP'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geoip.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'geoip.provider': {
'Meta': {'object_name': 'Provider'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isp': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['geoip.ISP']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'ranges': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'geoip.range': {
'Meta': {'object_name': 'Range'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geoip.Area']", 'null': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geoip.City']", 'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geoip.Country']"}),
'end_ip': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isp': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geoip.ISP']", 'null': 'True'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geoip.Provider']", 'null': 'True'}),
'start_ip': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['geoip'] | gpl-3.0 | -467,225,565,146,595,000 | 51.701863 | 147 | 0.565535 | false | 3.653747 | false | false | false |
neweagle/ns-3_multisim | pybindgen/wutils.py | 2 | 3389 | import os.path
import sys
import subprocess
import re
def _get_version_from_bzr_lib(path):
import bzrlib.tag, bzrlib.branch
fullpath = os.path.abspath(path)
if sys.platform == 'win32':
fullpath = fullpath.replace('\\', '/')
fullpath = '/' + fullpath
branch = bzrlib.branch.Branch.open('file://' + fullpath)
tags = bzrlib.tag.BasicTags(branch)
#print "Getting version information from bzr branch..."
branch.lock_read()
try:
history = branch.iter_merge_sorted_revisions(direction="reverse")
version = None
extra_version = []
for revid, depth, revno, end_of_merge in history:
for tag_name, tag_revid in tags.get_tag_dict().iteritems():
#print tag_revid, "<==>", revid
if tag_revid == revid:
#print "%s matches tag %s" % (revid, tag_name)
version = [int(s) for s in tag_name.split('.')]
## if the current revision does not match the last
## tag, we append current revno to the version
if tag_revid != branch.last_revision():
extra_version = [branch.revno()]
break
if version:
break
finally:
branch.unlock()
assert version is not None
_version = version + extra_version
return _version
def _get_version_from_bzr_command(path):
# get most recent tag first
most_recent_tag = None
proc = subprocess.Popen(['bzr', 'log', '--short'], stdout=subprocess.PIPE)
reg = re.compile('{([0-9]+)\.([0-9]+)\.([0-9]+)}')
for line in proc.stdout:
result = reg.search(line)
if result is not None:
most_recent_tag = [int(result.group(1)), int(result.group(2)), int(result.group(3))]
break
proc.stdout.close()
proc.wait()
assert most_recent_tag is not None
# get most recent revno
most_recent_revno = None
proc = subprocess.Popen(['bzr', 'revno'], stdout=subprocess.PIPE)
most_recent_revno = int(proc.stdout.read().strip())
proc.wait()
version = most_recent_tag + [most_recent_revno]
return version
_version = None
def get_version_from_bzr(path):
global _version
if _version is not None:
return _version
try:
import bzrlib.tag, bzrlib.branch
except ImportError:
return _get_version_from_bzr_command(path)
else:
return _get_version_from_bzr_lib(path)
def get_version(path=None):
if path is None:
path = os.path.dirname(__file__)
try:
return '.'.join([str(x) for x in get_version_from_bzr(path)])
except ImportError:
return 'unknown'
def generate_version_py(force=False, path=None):
"""generates pybindgen/version.py, unless it already exists"""
filename = os.path.join('pybindgen', 'version.py')
if not force and os.path.exists(filename):
return
if path is None:
path = os.path.dirname(__file__)
version = get_version_from_bzr(path)
dest = open(filename, 'w')
if isinstance(version, list):
dest.write('__version__ = %r\n' % (version,))
dest.write('"""[major, minor, micro, revno], '
'revno omitted in official releases"""\n')
else:
dest.write('__version__ = "%s"\n' % (version,))
dest.close()
| gpl-2.0 | 1,645,114,885,946,724,600 | 32.22549 | 96 | 0.581292 | false | 3.695747 | false | false | false |
nerzhul/Z-Eye | service/Collectors/zNetdisco.py | 1 | 3330 | # -*- Coding: utf-8 -*-
"""
* Copyright (C) 2010-2014 Loic BLOT, CNRS <http://www.unix-experience.fr/>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, re, time, thread, subprocess
from pyPgSQL import PgSQL
import ZEyeUtil, zConfig
from DatabaseManager import ZEyeSQLMgr
class NetdiscoDataRefresher(ZEyeUtil.Thread):
def __init__(self):
""" 15 min between two netdisco updates """
self.sleepingTimer = 900
self.myName = "Netdisco Data Refresher"
ZEyeUtil.Thread.__init__(self)
def run(self):
self.launchMsg()
while True:
self.setRunning(True)
starttime = datetime.datetime.now()
self.launchRefresh()
"""
Because netdisco can be slow, modify the sleeping timer to
refresh datas faster
"""
totaltime = datetime.datetime.now() - starttime
"""
If runtime exceed 10 mins, sleeping timer is 15 min - totaltime
But if there is less than 1 minute interval, let 1 min interval
"""
if totaltime > 600:
self.sleepingTimer = 900 - totaltime
if self.sleepingTimer < 60:
self.sleepingTimer = 60
self.setRunning(False)
def launchRefresh(self):
try:
cmd = "/usr/bin/perl /usr/local/bin/netdisco -C /usr/local/etc/netdisco/netdisco.conf -R"
subprocess.check_output(cmd,shell=True)
self.logInfo("Refresh OK, now nbtwalk")
cmd = "/usr/bin/perl /usr/local/bin/netdisco -C /usr/local/etc/netdisco/netdisco.conf -w" % device
subprocess.check_output(cmd,shell=True)
self.logInfo("nbtwalk OK, now macwalk")
cmd = "/usr/bin/perl /usr/local/bin/netdisco -C /usr/local/etc/netdisco/netdisco.conf -m" % device
subprocess.check_output(cmd,shell=True)
self.logInfo("macwalk OK, now arpwalk")
cmd = "/usr/bin/perl /usr/local/bin/netdisco -C /usr/local/etc/netdisco/netdisco.conf -a" % device
subprocess.check_output(cmd,shell=True)
except Exception, e:
self.logCritical(e)
sys.exit(1);
class NetdiscoDataCleanup(ZEyeUtil.Thread):
def __init__(self):
""" 15 mins between two netdisco cleanups """
self.sleepingTimer = 900
self.myName = "Netdisco Data Cleanup"
ZEyeUtil.Thread.__init__(self)
def run(self):
self.launchMsg()
while True:
self.setRunning(True)
self.launchCleanup()
self.setRunning(False)
def launchCleanup(self):
try:
self.pgcon = PgSQL.connect(host=zConfig.pgHost,user=zConfig.pgUser,password=zConfig.pgPwd,database=zConfig.pgDB)
self.pgcursor = self.pgcon.cursor()
self.pgcursor.execute("DELETE FROM z_eye_switch_port_prises WHERE (ip,port) NOT IN (select host(ip),port from device_port)")
self.pgcon.commit()
except Exception, e:
self.logCritical(e)
sys.exit(1);
| gpl-2.0 | 6,357,073,503,739,357,000 | 30.714286 | 127 | 0.708408 | false | 3.1209 | false | false | false |
halfak/python-para | setup.py | 1 | 1232 | import os
from distutils.core import setup
from setuptools import find_packages
about_path = os.path.join(os.path.dirname(__file__), "para/about.py")
exec(compile(open(about_path).read(), about_path, "exec"))
def requirements(fname):
return [line.strip()
for line in open(os.path.join(os.path.dirname(__file__), fname))]
setup(
name=__name__, # noqa
version=__version__, # noqa
author=__author__, # noqa
author_email=__author_email__, # noqa
description=__description__, # noqa
url=__url__, # noqa
license=__license__, # noqa
packages=find_packages(),
scripts=[],
long_description=open("README.md").read(),
install_requires=[],
test_suite="nose.collector",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
"Topic :: Scientific/Engineering"
],
)
| mit | 6,124,903,700,830,610,000 | 29.8 | 77 | 0.607955 | false | 4.134228 | false | false | false |
skorokithakis/django-brake | brake/decorators.py | 2 | 2649 | import re
from functools import wraps
from django.conf import settings
from django.http import HttpResponse
class HttpResponseTooManyRequests(HttpResponse):
status_code = getattr(settings, 'RATELIMIT_STATUS_CODE', 403)
def _method_match(request, method=None):
if method is None:
method = ['GET', 'POST', 'PUT', 'DELETE', 'HEAD']
if not isinstance(method, list):
method = [method]
return request.method in method
_PERIODS = {
's': 1,
'm': 60,
'h': 60 * 60,
'd': 24 * 60 * 60,
}
rate_re = re.compile('([\d]+)/([\d]*)([smhd])')
def _split_rate(rate):
count, multi, period = rate_re.match(rate).groups()
count = int(count)
time = _PERIODS[period.lower()]
if multi:
time = time * int(multi)
return count, time
def get_class_by_path(path):
mod = __import__('.'.join(path.split('.')[:-1]))
components = path.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
# Allows you to override the CacheBackend in your settings.py
_backend_class = getattr(
settings,
'RATELIMIT_CACHE_BACKEND',
'brake.backends.cachebe.CacheBackend'
)
_backend = get_class_by_path(_backend_class)()
def ratelimit(
ip=True, use_request_path=False, block=False, method=None, field=None, rate='5/m', increment=None
):
def decorator(fn):
count, period = _split_rate(rate)
@wraps(fn)
def _wrapped(request, *args, **kw):
if use_request_path:
func_name = request.path
else:
func_name = fn.__name__
response = None
if _method_match(request, method):
limits = _backend.limit(
func_name, request, ip, field, count, period
)
if limits:
if block:
response = HttpResponseTooManyRequests()
request.limited = True
request.limits = limits
if response is None:
# If the response isn't HttpResponseTooManyRequests already, run
# the actual function to get the result.
response = fn(request, *args, **kw)
if not isinstance(response, HttpResponseTooManyRequests):
if _method_match(request, method) and \
(increment is None or (callable(increment) and increment(
request, response
))):
_backend.count(func_name, request, ip, field, period)
return response
return _wrapped
return decorator
| bsd-3-clause | -6,449,535,197,222,197,000 | 27.483871 | 101 | 0.562099 | false | 4.069124 | false | false | false |
trong-nguyen/algorithms | leet/contest_74.py | 1 | 2834 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import sys
from utils.templates import fail_string
import bisect
class Solution(object):
def is_subsequence(self, word, lookup):
# print lookup
if not word:
return True
left = -1
for c in word:
if c not in lookup:
return False
i = bisect.bisect_right(lookup[c], left)
if i < len(lookup[c]):
left = lookup[c][i]
else:
return False
return True
def numMatchingSubseq(self, S, words):
"""
:type S: str
:type words: List[str]
:rtype: int
"""
lookup = {}
for i, c in enumerate(S):
lookup[c] = lookup.get(c, [])
lookup[c].append(i)
count = 0
for word in words:
if self.is_subsequence(word, lookup):
count += 1
return count
class Solution2(object):
def validTicTacToe(self, board):
"""
:type board: List[str]
:rtype: bool
"""
num_x = 0
num_o = 0
for b in board:
for c in b:
if c == 'X':
num_x += 1
elif c == 'O':
num_o += 1
if not 0 <= num_x - num_o <= 1:
return False
if num_x < 3:
return True
rows = board
cols = [''.join([board[i][j] for i in range(3)]) for j in range(3)]
diags = [''.join([board[i][i] for i in range(3)]), ''.join([board[i][2-i] for i in range(3)])]
all_lines = rows + cols + diags
x_win = 'X'*3 in all_lines
o_win = 'O'*3 in all_lines
if x_win:
return num_x > num_o and not o_win
if o_win:
return num_x == num_o
return not (x_win and o_win)
def unit_test():
solution = Solution()
assert solution.numMatchingSubseq("dsahjpjauf", ["ahjpjau","ja","ahbwzgqnuk","tnmlanowax"]) == 2
assert solution.numMatchingSubseq("qlhxagxdqh", ["qlhxagxdq","qlhxagxdq","lhyiftwtut","yfzwraahab"]) == 2
assert solution.numMatchingSubseq("abcde", ["a", "bb", "acd", "ace"]) == 3
assert solution.numMatchingSubseq("abcdbcae", ["a", "bb", "acd", "ace", 'bb', 'bcbc']) == 6
solution = Solution2()
assert solution.validTicTacToe(["XXX","XOO","OO "]) == False
assert solution.validTicTacToe(["XOX","X O","X O"]) == True
assert solution.validTicTacToe(["O ", " ", " "]) == False
assert solution.validTicTacToe(["XOX", " X ", " "]) == False
assert solution.validTicTacToe(["XXX", " ", "OOO"]) == False
assert solution.validTicTacToe(["XOX", "O O", "XOX"]) == True
def test():
pass
if __name__ == '__main__':
unit_test()
test() | gpl-3.0 | -521,955,089,127,808,640 | 24.772727 | 109 | 0.498941 | false | 3.385902 | false | false | false |
Thortoise/Super-Snake | Blender/animation_nodes-master/nodes/mesh/generation/grid.py | 1 | 1992 | import bpy
from bpy.props import *
from mathutils import Vector
from .... base_types.node import AnimationNode
from .... algorithms.mesh_generation.indices_utils import GridMeshIndices
from .... algorithms.mesh_generation.basic_shapes import gridVertices
from .... events import executionCodeChanged
class GridMeshNode(bpy.types.Node, AnimationNode):
bl_idname = "an_GridMeshNode"
bl_label = "Grid Mesh"
bl_width_default = 160
centerGrid = BoolProperty(name = "Center", default = True, update = executionCodeChanged)
def create(self):
self.newInput("Integer", "X Divisions", "xDivisions", value = 5, minValue = 2)
self.newInput("Integer", "Y Divisions", "yDivisions", value = 5, minValue = 2)
self.newInput("Float", "X Distance", "xDistance", value = 1)
self.newInput("Float", "Y Distance", "yDistance", value = 1)
self.newInput("Vector", "Offset", "offset", isDataModified = True)
self.newOutput("Vector List", "Vertices", "vertices")
self.newOutput("Edge Indices List", "Edge Indices", "edgeIndices")
self.newOutput("Polygon Indices List", "Polygon Indices", "polygonIndices")
def draw(self, layout):
layout.prop(self, "centerGrid")
def execute(self, xDivisions, yDivisions, xDistance, yDistance, offset):
xDivisions = max(xDivisions, 2)
yDivisions = max(yDivisions, 2)
offset = offset.copy()
offset.x -= (xDivisions - 1) * xDistance / 2 if self.centerGrid else 0
offset.y -= (yDivisions - 1) * yDistance / 2 if self.centerGrid else 0
vertices = gridVertices(xDivisions, yDivisions, xDistance, yDistance, offset) if self.outputs[0].isLinked else []
edgeIndices = GridMeshIndices.innerQuadEdges(xDivisions, yDivisions) if self.outputs[1].isLinked else []
polygonIndices = GridMeshIndices.innerQuadPolygons(xDivisions, yDivisions) if self.outputs[2].isLinked else []
return vertices, edgeIndices, polygonIndices
| gpl-3.0 | -2,165,053,050,149,164,300 | 47.585366 | 121 | 0.689759 | false | 3.688889 | false | false | false |
addition-it-solutions/project-all | addons/website_crm_score/models/sales_team.py | 6 | 9841 | # -*- coding: utf-8 -*-
from openerp import fields, api, models
from openerp.osv import osv
from openerp.tools.safe_eval import safe_eval
from random import randint, shuffle
import datetime
import logging
import math
_logger = logging.getLogger(__name__)
evaluation_context = {
'datetime': datetime,
'context_today': datetime.datetime.now,
}
try:
from flanker.addresslib import address
def checkmail(mail):
return bool(address.validate_address(mail))
except ImportError:
_logger.warning('flanker not found, email validation disabled.')
def checkmail(mail):
return True
class team_user(models.Model):
_name = 'team.user'
@api.one
def _count_leads(self):
if self.id:
limit_date = datetime.datetime.now() - datetime.timedelta(days=30)
domain = [('user_id', '=', self.user_id.id),
('team_id', '=', self.team_id.id),
('assign_date', '>', fields.Datetime.to_string(limit_date))
]
self.leads_count = self.env['crm.lead'].search_count(domain)
else:
self.leads_count = 0
@api.one
def _get_percentage(self):
try:
self.percentage_leads = round(100 * self.leads_count / float(self.maximum_user_leads), 2)
except ZeroDivisionError:
self.percentage_leads = 0.0
@api.one
@api.constrains('team_user_domain')
def _assert_valid_domain(self):
try:
domain = safe_eval(self.team_user_domain or '[]', evaluation_context)
self.env['crm.lead'].search(domain)
except Exception:
raise Warning('The domain is incorrectly formatted')
team_id = fields.Many2one('crm.team', string='SaleTeam', required=True, oldname='section_id')
user_id = fields.Many2one('res.users', string='Saleman', required=True)
name = fields.Char(related='user_id.partner_id.display_name')
running = fields.Boolean(string='Running', default=True)
team_user_domain = fields.Char('Domain')
maximum_user_leads = fields.Integer('Leads Per Month')
leads_count = fields.Integer('Assigned Leads', compute='_count_leads', help='Assigned Leads this last month')
percentage_leads = fields.Float(compute='_get_percentage', string='Percentage leads')
@api.one
def toggle_active(self):
if isinstance(self.id, int): # if already saved
self.running = not self.running
class crm_team(osv.osv):
_inherit = "crm.team"
@api.one
def _count_leads(self):
if self.id:
self.leads_count = self.env['crm.lead'].search_count([('team_id', '=', self.id)])
else:
self.leads_count = 0
@api.one
def _assigned_leads(self):
limit_date = datetime.datetime.now() - datetime.timedelta(days=30)
domain = [('assign_date', '>=', fields.Datetime.to_string(limit_date)),
('team_id', '=', self.id),
('user_id', '!=', False)
]
self.assigned_leads = self.env['crm.lead'].search_count(domain)
@api.one
def _unassigned_leads(self):
self.unassigned_leads = self.env['crm.lead'].search_count(
[('team_id', '=', self.id), ('user_id', '=', False), ('assign_date', '=', False)]
)
@api.one
def _capacity(self):
self.capacity = sum(s.maximum_user_leads for s in self.team_user_ids)
@api.one
@api.constrains('score_team_domain')
def _assert_valid_domain(self):
try:
domain = safe_eval(self.score_team_domain or '[]', evaluation_context)
self.env['crm.lead'].search(domain)
except Exception:
raise Warning('The domain is incorrectly formatted')
ratio = fields.Float(string='Ratio')
score_team_domain = fields.Char('Domain')
leads_count = fields.Integer(compute='_count_leads')
assigned_leads = fields.Integer(compute='_assigned_leads')
unassigned_leads = fields.Integer(compute='_unassigned_leads')
capacity = fields.Integer(compute='_capacity')
team_user_ids = fields.One2many('team.user', 'team_id', string='Salesman')
min_for_assign = fields.Integer("Minimum score", help="Minimum score to be automatically assign (>=)", default=0, required=True)
@api.model
def direct_assign_leads(self, ids=[]):
ctx = dict(self._context, mail_notify_noemail=True)
self.with_context(ctx)._assign_leads()
@api.model
def dry_assign_leads(self, ids=[]):
self._assign_leads(dry=True)
@api.model
# Note: The dry mode assign only 50 leads per salesteam for speed issues
def assign_leads_to_salesteams(self, all_salesteams, dry=False):
shuffle(all_salesteams)
haslead = True
while haslead:
haslead = False
for salesteam in all_salesteams:
domain = safe_eval(salesteam['score_team_domain'], evaluation_context)
domain.extend([('team_id', '=', False), ('user_id', '=', False)])
domain.extend(['|', ('stage_id.on_change', '=', False), '&', ('stage_id.probability', '!=', 0), ('stage_id.probability', '!=', 100)])
leads = self.env["crm.lead"].search(domain, limit=50)
haslead = haslead or (len(leads) == 50 and not dry)
if not leads.exists():
continue
if dry:
for lead in leads:
values = {'lead_id': lead.id, 'team_id': salesteam['id']}
self.env['crm.leads.dry.run'].create(values)
else:
leads.write({'team_id': salesteam['id']})
# Erase fake/false email
spams = map(lambda x: x.id, filter(lambda x: x.email_from and not checkmail(x.email_from), leads))
if spams:
self.env["crm.lead"].browse(spams).write({'email_from': False})
# Merge duplicated lead
leads_done = set()
for lead in leads:
if lead.id not in leads_done:
leads_duplicated = lead.get_duplicated_leads(False)
if len(leads_duplicated) > 1:
self.env["crm.lead"].browse(leads_duplicated).merge_opportunity(False, False)
leads_done.update(leads_duplicated)
self._cr.commit()
self._cr.commit()
@api.model
def assign_leads_to_salesmen(self, all_team_users, dry=False):
users = []
for su in all_team_users:
if (su.maximum_user_leads - su.leads_count) <= 0:
continue
domain = safe_eval(su.team_user_domain or '[]', evaluation_context)
domain.extend([
('user_id', '=', False),
('assign_date', '=', False),
('score', '>=', su.team_id.min_for_assign)
])
# assignation rythm: 2 days of leads if a lot of leads should be assigned
limit = int(math.ceil(su.maximum_user_leads / 15.0))
if dry:
dry_leads = self.env["crm.leads.dry.run"].search([('team_id', '=', su.team_id.id)])
domain.append(['id', 'in', dry_leads.mapped('lead_id.id')])
else:
domain.append(('team_id', '=', su.team_id.id))
leads = self.env["crm.lead"].search(domain, order='score desc', limit=limit * len(su.team_id.team_user_ids))
users.append({
"su": su,
"nbr": min(su.maximum_user_leads - su.leads_count, limit),
"leads": leads
})
assigned = set()
while users:
i = 0
# statistically select the user that should receive the next lead
idx = randint(0, reduce(lambda nbr, x: nbr + x['nbr'], users, 0) - 1)
while idx > users[i]['nbr']:
idx -= users[i]['nbr']
i += 1
user = users[i]
# Get the first unassigned leads available for this user
while user['leads'] and user['leads'][0] in assigned:
user['leads'] = user['leads'][1:]
if not user['leads']:
del users[i]
continue
# lead convert for this user
lead = user['leads'][0]
assigned.add(lead)
if dry:
values = {'lead_id': lead.id, 'team_id': user['su'].team_id.id, 'user_id': user['su'].user_id.id}
self.env['crm.leads.dry.run'].create(values)
else:
# Assign date will be setted by write function
data = {'user_id': user['su'].user_id.id}
lead.write(data)
lead.convert_opportunity(lead.partner_id and lead.partner_id.id or None)
self._cr.commit()
user['nbr'] -= 1
if not user['nbr']:
del users[i]
@api.model
def _assign_leads(self, dry=False):
# Emptying the table
self._cr.execute("""
TRUNCATE TABLE crm_leads_dry_run;
""")
all_salesteams = self.search_read(fields=['score_team_domain'], domain=[('score_team_domain', '!=', False)])
all_team_users = self.env['team.user'].search([('running', '=', True)])
self.env['website.crm.score'].assign_scores_to_leads()
self.assign_leads_to_salesteams(all_salesteams, dry=dry)
# Compute score after assign to salesteam, because if a merge has been done, the score for leads is removed.
self.env['website.crm.score'].assign_scores_to_leads()
self.assign_leads_to_salesmen(all_team_users, dry=dry)
| agpl-3.0 | 7,818,359,904,374,259,000 | 37.291829 | 149 | 0.553297 | false | 3.795218 | false | false | false |
lakshmi-kannan/st2 | st2common/st2common/util/reference.py | 13 | 2953 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.exceptions import db
from st2common.models.system.common import ResourceReference
def get_ref_from_model(model):
if model is None:
raise ValueError('Model has None value.')
model_id = getattr(model, 'id', None)
if model_id is None:
raise db.StackStormDBObjectMalformedError('model %s must contain id.' % str(model))
reference = {'id': str(model_id),
'name': getattr(model, 'name', None)}
return reference
def get_model_from_ref(db_api, reference):
if reference is None:
raise db.StackStormDBObjectNotFoundError('No reference supplied.')
model_id = reference.get('id', None)
if model_id is not None:
return db_api.get_by_id(model_id)
model_name = reference.get('name', None)
if model_name is None:
raise db.StackStormDBObjectNotFoundError('Both name and id are None.')
return db_api.get_by_name(model_name)
def get_model_by_resource_ref(db_api, ref):
"""
Retrieve a DB model based on the resource reference.
:param db_api: Class of the object to retrieve.
:type db_api: ``object``
:param ref: Resource reference.
:type ref: ``str``
:return: Retrieved object.
"""
ref_obj = ResourceReference.from_string_reference(ref=ref)
result = db_api.query(name=ref_obj.name, pack=ref_obj.pack).first()
return result
def get_resource_ref_from_model(model):
"""
Return a ResourceReference given db_model.
:param model: DB model that contains name and pack.
:type model: ``object``
:return: ResourceReference.
"""
try:
name = model.name
pack = model.pack
except AttributeError:
raise Exception('Cannot build ResourceReference for model: %s. Name or pack missing.',
model)
return ResourceReference(name=name, pack=pack)
def get_str_resource_ref_from_model(model):
"""
Return a resource reference as string given db_model.
:param model: DB model that contains name and pack.
:type model: ``object``
:return: String representation of ResourceReference.
"""
return get_resource_ref_from_model(model).ref
| apache-2.0 | -5,545,722,002,064,985,000 | 32.942529 | 94 | 0.691162 | false | 3.900925 | false | false | false |
mdl29/tidutyzef | serveur/playerws.py | 1 | 2544 | """Contain the socket handler for players"""
from utils import check_types
from player import Player
from game import Game
from websocket import WebSocketHandler
import errcode
class PlayerWs(WebSocketHandler):
"""The socket handler for websocket"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.player = None
self.callable_from_json = {"login": self.login,
"logout": self.logout,
"updatePos": self.update_pos,
"choice": self.choice,
"getAllUsers": self.get_all_users}
def get_all_users(self):
"""send to player all players connected"""
players = Game().get_players()
msg = {'object': "usersConnected",
'tidu': [p.name for p in players if p.team == 'tidu'],
'tizef': [p.name for p in players if p.team == 'tizef']}
self.send(msg)
def reset(self):
"""Reset the socket (recreate a new Player...)"""
self.player = None
self.logged = False
@check_types
def login(self, username, team):
"""Login player and look if username and team are valids"""
if self.player:
self.send(errcode.USERNAME_ALREADY_SET)
else:
self.player = Player(username, team, self)
if not Game().add_player(self.player):
self.send(errcode.USERNAME_ALREADY_IN_USE)
self.reset()
self.logged = True
def logout(self):
"""logout player and remove it from game"""
self.close()
@check_types
def update_pos(self, lat: float, lng: float):
"""update the player position"""
self.player.position = (lat, lng)
def choice(self, choice: str):
"""set choice for battle"""
Game().set_player_choice(self.player, choice)
def on_close(self):
print("player {} of team {} is exiting...".format(self.player.name, self.player.team))
if self.player:
self.logout()
Game().remove_player(self.player)
self.reset()
def send(self, msg):
super().send(msg)
if self.player:
print('Send to {} of team {} : {}'.format(self.player.name, self.player.team, msg))
def on_message(self, msg):
if self.player:
print('Send by {} of team {} : {}'.format(self.player.name, self.player.team, msg))
super().on_message(msg)
| lgpl-3.0 | -6,687,239,132,226,760,000 | 32.473684 | 95 | 0.551494 | false | 4.076923 | false | false | false |
hirvola/bsa | 1MB/pat-gen.py | 1 | 2495 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
import sys, argparse, random
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Pattern generator")
parser.add_argument("infile", nargs='?', type=argparse.FileType('rb'), default=sys.stdin.buffer)
parser.add_argument("-p", type=int, default=10, help="number of patterns to generate")
parser.add_argument("-P", action='store_true', help="transform patterns in infile")
parser.add_argument("-m", type=int, default=None, help="minimum length of a pattern")
parser.add_argument("-M", type=int, default=None, help="maximum length of a pattern")
parser.add_argument("-n", type=int, default=None, help="minimum noise")
parser.add_argument("-N", type=int, default=None, help="maximum noise")
parser.add_argument("-r", action='store_true', help="rotate to create circular patterns")
parser.add_argument("-s", type=int, default=None, help="random seed")
args = parser.parse_args()
random.seed(args.s)
# Sanity checks
m = args.m if args.m is not None else args.M or 20
M = args.M if args.M is not None else m
n = args.n if args.n is not None else 0
N = args.N if args.N is not None else n
if m < 1 or m > M: raise Exception("Invalid min and max pattern lengths")
if n < 0 or n > N: raise Exception("Invalid min and max noise")
# Get a random text substring
def randsubstring(T):
while True:
i = random.randrange(0, len(T)-M)
j = i + random.randint(m, M)
P = T[i:j]
if all(x not in b'\n\0' for x in P):
return P
raise Exception("WTF")
T = args.infile.read()
if args.P is True:
# Transform existing patterns
gen = (ln for ln in T.split(b'\n') if ln)
else:
# Generate new patterns
if len(T) < M: raise Exception("Too short infile")
gen = (randsubstring(T) for i in range(args.p))
# Character pool for noise
pool = list(set(T) - set(b'\n\r\t\0')) if N > 0 else []
def transform(P):
# Simulate noise
for noise in range(random.randint(n, N)):
k = random.randrange(0, len(P))
P = P[:k] + bytes([random.choice(pool)]) + P[k+1:]
# Rotate
if args.r is True:
k = random.randrange(0, len(P))
P = P[k:] + P[:k]
return P
for P in gen:
sys.stdout.buffer.write(transform(P) + b'\n')
sys.stdout.buffer.flush()
| mit | 8,204,699,868,084,737,000 | 37.984375 | 100 | 0.595591 | false | 3.465278 | false | false | false |
MehdiSfr/tensor-flow | tensorflow/python/training/saver.py | 1 | 35528 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Save and restore variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import time
import numpy as np
import six
from google.protobuf import text_format
from tensorflow.python.client import graph_util
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import logging
from tensorflow.python.training import saver_pb2
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
class BaseSaverBuilder(object):
"""Base class for Savers.
Can be extended to create different Ops.
"""
class VarToSave(object):
"""Class used to describe variable slices that need to be saved."""
def __init__(self, var, slice_spec, name):
self.var = var
self.slice_spec = slice_spec
self.name = name
def __init__(self):
pass
def save_op(self, filename_tensor, vars_to_save):
"""Create an Op to save 'vars_to_save'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
vars_to_save: a list of BaseSaverBuilder.VarToSave objects.
Returns:
An Operation that save the variables.
"""
return io_ops._save(
filename=filename_tensor,
tensor_names=[vs.name for vs in vars_to_save],
tensors=[vs.var for vs in vars_to_save],
tensor_slices=[vs.slice_spec for vs in vars_to_save])
def restore_op(self, filename_tensor, var_to_save, preferred_shard):
"""Create an Op to read the variable 'var_to_save'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
var_to_save: a BaseSaverBuilder.VarToSave object.
preferred_shard: Int. Shard to open first when loading a sharded file.
Returns:
A Tensor resulting from reading 'var_to_save' from 'filename'.
"""
return io_ops._restore_slice(
filename_tensor,
var_to_save.name,
var_to_save.slice_spec,
var_to_save.var.dtype,
preferred_shard=preferred_shard)
def sharded_filename(self, filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: a string tensor.
shard: integer. The shard for the filename.
num_shards: an int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops._sharded_filename(filename_tensor, shard, num_shards)
def _AddSaveOps(self, filename_tensor, vars_to_save):
"""Add ops to save variables that are on the same shard.
Args:
filename_tensor: String Tensor.
vars_to_save: a list of _VarToSave objects.
Returns:
A tensor with the filename used to save.
"""
save = self.save_op(filename_tensor, vars_to_save)
return control_flow_ops.with_dependencies([save], filename_tensor)
def _AddShardedSaveOps(self, filename_tensor, per_device):
"""Add ops to save the params per shard.
Args:
filename_tensor: String Tensor.
per_device: A list of (device, BaseSaverBuilder.VarToSave) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables.
"""
num_shards = len(per_device)
sharded_saves = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
for shard, (device, vars_to_save) in enumerate(per_device):
with ops.device(device):
sharded_filename = self.sharded_filename(
filename_tensor, shard, num_shards_tensor)
sharded_saves.append(self._AddSaveOps(sharded_filename, vars_to_save))
# Return the sharded name for the save path.
with ops.control_dependencies([x.op for x in sharded_saves]):
return gen_io_ops._sharded_filespec(filename_tensor, num_shards_tensor)
def _AddRestoreOps(self,
filename_tensor,
vars_to_save,
restore_sequentially,
reshape,
preferred_shard=-1,
name="restore_all"):
"""Add operations to restore vars_to_save.
Args:
filename_tensor: Tensor for the path of the file to load.
vars_to_save: a list of _VarToSave objects.
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of
the corresponding variable.
preferred_shard: Shard to open first when loading a sharded file.
name: Name for the returned op.
Returns:
An Operation that restores the variables.
"""
assign_ops = []
for vs in vars_to_save:
v = vs.var
restore_control_inputs = assign_ops[-1:] if restore_sequentially else []
# Load and optionally reshape on the CPU, as string tensors are not
# available on the GPU.
# TODO(touts): Re-enable restore on GPU when we can support annotating
# string tensors as "HostMemory" inputs.
with ops.device(graph_util.set_cpu0(v.device) if v.device else None):
with ops.control_dependencies(restore_control_inputs):
values = self.restore_op(filename_tensor, vs, preferred_shard)
if reshape:
shape = v.get_shape()
if not shape.is_fully_defined():
shape = array_ops.shape(v)
values = array_ops.reshape(values, shape)
# Assign on the same device as the variable.
with ops.device(v.device):
assign_ops.append(state_ops.assign(v,
values,
validate_shape=not reshape))
# Create a Noop that has control dependencies from all the updates.
return control_flow_ops.group(*assign_ops, name=name)
def _AddShardedRestoreOps(self, filename_tensor, per_device,
restore_sequentially, reshape):
"""Add Ops to save variables from multiple devices.
Args:
filename_tensor: Tensor for the path of the file to load.
per_device: A list of (device, _VarToSave) pairs, as
returned by _GroupByDevices().
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of
the corresponding variable.
Returns:
An Operation that restores the variables.
"""
sharded_restores = []
for shard, (device, vars_to_save) in enumerate(per_device):
with ops.device(device):
sharded_restores.append(self._AddRestoreOps(
filename_tensor,
vars_to_save,
restore_sequentially,
reshape,
preferred_shard=shard,
name="restore_shard"))
return control_flow_ops.group(*sharded_restores, name="restore_all")
def _IsVariable(self, v):
return isinstance(v, ops.Tensor) and (
v.op.type == "Variable" or v.op.type == "AutoReloadVariable")
def _GroupByDevices(self, vars_to_save):
"""Group Variable tensor slices per device.
TODO(touts): Make sure that all the devices found are on different
job/replica/task/cpu|gpu. It would be bad if 2 were on the same device.
It can happen if the devices as unspecified.
Args:
vars_to_save: a list of BaseSaverBuilder.VarToSave objects.
Returns:
A list of tuples: (device_name, BaseSaverBuilder.VarToSave) tuples.
The list is sorted by ascending device_name.
"""
per_device = collections.defaultdict(lambda: [])
for var_to_save in vars_to_save:
per_device[var_to_save.var.device].append(var_to_save)
return sorted(per_device.items(), key=lambda t: t[0])
def _VarListToDict(self, var_list):
"""Create a dictionary of names to variable lists.
Args:
var_list: A list, tuple, or set of Variables.
Returns:
A dictionary of variable names to the variables that must be saved under
that name. Variables with save_slice_info are grouped together under the
same key in no particular order.
Raises:
TypeError: If the type of var_list or its elements is not supported.
ValueError: If at least two variables share the same name.
"""
if not isinstance(var_list, (list, tuple, set)):
raise TypeError("Variables to save should be passed in a dict or a "
"list: %s" % var_list)
var_list = set(var_list)
names_to_variables = {}
for var in var_list:
# pylint: disable=protected-access
if isinstance(var, variables.Variable) and var._save_slice_info:
name = var._save_slice_info.name
if name in names_to_variables:
if not isinstance(names_to_variables[name], list):
raise ValueError("Mixing slices and non-slices with the same name: "
"%s" % name)
names_to_variables[name].append(var)
else:
names_to_variables[name] = [var]
else:
var = ops.convert_to_tensor(var)
if not self._IsVariable(var):
raise TypeError("Variable to save is not a Variable: %s" % var)
name = var.op.name
if name in names_to_variables:
raise ValueError("At least two variables have the same name: %s" %
name)
names_to_variables[name] = var
# pylint: enable=protected-access
return names_to_variables
def _ValidateAndSliceInputs(self, names_to_variables):
"""Returns the variables and names that will be used for a Saver.
Args:
names_to_variables: A dict (k, v) where k is the name of a variable and v
is a Variable to save or a BaseSaverBuilder.Saver.
Returns:
A list of BaseSaverBuilder.VarToSave objects.
Raises:
TypeError: if any of the keys are not strings or any of the
values are not one of Tensor or Variable.
ValueError: if the same variable is given in more than one value
(this also applies to slices of SlicedVariables).
"""
if not isinstance(names_to_variables, dict):
names_to_variables = self._VarListToDict(names_to_variables)
vars_to_save = []
seen_variables = set()
for name in sorted(names_to_variables.keys()):
if not isinstance(name, six.string_types):
raise TypeError("names_to_variables must be a dict mapping string "
"names to variable Tensors. Name is not a string: %s" %
name)
v = names_to_variables[name]
if isinstance(v, (list, tuple)):
# A set of slices.
slice_name = None
# pylint: disable=protected-access
for variable in v:
if not isinstance(variable, variables.Variable):
raise ValueError("Slices must all be Variables: %s" % variable)
if not variable._save_slice_info:
raise ValueError("Slices must all be slices: %s" % variable)
if slice_name is None:
slice_name = variable._save_slice_info.name
elif slice_name != variable._save_slice_info.name:
raise variable("Slices must all be from the same tensor: %s != %s"
% (slice_name, variable._save_slice_info.name))
self._AddVarToSave(vars_to_save, seen_variables,
variable, variable._save_slice_info.spec, name)
# pylint: enable=protected-access
else:
# A variable or tensor.
variable = ops.convert_to_tensor(v)
if not self._IsVariable(variable):
raise TypeError("names_to_variables must be a dict mapping string "
"names to Tensors/Variables. Not a variable: %s" %
variable)
self._AddVarToSave(vars_to_save, seen_variables, variable, "", name)
return vars_to_save
def _AddVarToSave(self, vars_to_save, seen_variables, variable, slice_spec,
name):
"""Create a VarToSave and add it to the vars_to_save list.
Args:
vars_to_save: List to append the new VarToSave to.
seen_variables: Set of variables already processed. Used to check
that each variable is only saved once.
variable: Variable to save.
slice_spec: String. Slice spec for the variable.
name: Name to use to save the variable.
Raises:
ValueError: If the variable has already been processed.
"""
if variable in seen_variables:
raise ValueError("The same variable will be restored with two names: %s",
variable)
vars_to_save.append(BaseSaverBuilder.VarToSave(variable, slice_spec, name))
seen_variables.add(variable)
def build(self,
names_to_variables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False):
"""Adds save/restore nodes to the graph and creates a SaverDef proto.
Args:
names_to_variables: A dictionary mapping name to a Variable.
Each name will be associated with the
corresponding variable in the checkpoint.
reshape: If True, allow restoring parameters from a checkpoint
that where the parameters have a different shape. This is
only needed when you try to restore from a Dist-Belief checkpoint,
and only some times.
sharded: If True, shard the checkpoints, one per device that has
Parameters nodes.
max_to_keep: maximum number of checkpoints to keep. As new checkpoints
are created, old ones are deleted. If None or 0, no checkpoints are
deleted. Presently the number is only roughly enforced. For example
in case of restarts more than max_to_keep checkpoints may be kept.
keep_checkpoint_every_n_hours: How often checkpoints should be kept.
Defaults to 10,000 hours.
name: string. Optional name to use as a prefix when adding operations.
restore_sequentially: A Bool, which if true, causes restore of different
variables to happen sequentially within each device.
Returns:
A SaverDef proto.
Raises:
TypeError: If 'names_to_variables' is not a dictionary mapping string
keys to variable Tensors.
ValueError: If any of the keys or values in 'names_to_variables' is not
unique.
"""
vars_to_save = self._ValidateAndSliceInputs(names_to_variables)
if max_to_keep is None:
max_to_keep = 0
with ops.op_scope([vs.var for vs in vars_to_save], name, "save") as name:
# Add the Constant string tensor for the filename.
filename_tensor = constant_op.constant("model")
# Add the save ops.
if sharded:
per_device = self._GroupByDevices(vars_to_save)
save_tensor = self._AddShardedSaveOps(filename_tensor, per_device)
restore_op = self._AddShardedRestoreOps(
filename_tensor, per_device, restore_sequentially, reshape)
else:
save_tensor = self._AddSaveOps(filename_tensor, vars_to_save)
restore_op = self._AddRestoreOps(
filename_tensor, vars_to_save, restore_sequentially, reshape)
assert restore_op.name.endswith("restore_all"), restore_op.name
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
sharded=sharded)
def _GetCheckpointFilename(save_dir, latest_filename):
"""Returns a filename for storing the CheckpointState.
Args:
save_dir: The directory for saving and restoring checkpoints.
latest_filename: Name of the file in 'save_dir' that is used
to store the CheckpointState.
Returns:
The path of the file that contains the CheckpointState proto.
"""
if latest_filename is None:
latest_filename = "checkpoint"
return os.path.join(save_dir, latest_filename)
def update_checkpoint_state(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: list of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Raises:
RuntimeError: If the save paths conflict.
"""
if all_model_checkpoint_paths is None:
all_model_checkpoint_paths = []
elif all_model_checkpoint_paths[-1] != model_checkpoint_path:
logging.warning(
"%s is not in all_model_checkpoint_paths! Manually adding it.",
model_checkpoint_path)
all_model_checkpoint_paths.append(model_checkpoint_path)
# Writes the "checkpoint" file for the coordinator for later restoration.
coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)
if coord_checkpoint_filename == model_checkpoint_path:
raise RuntimeError("Save path '%s' conflicts with path used for "
"checkpoint state. Please use a different save path." %
model_checkpoint_path)
coord_checkpoint_proto = CheckpointState(
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths)
f = gfile.FastGFile(coord_checkpoint_filename, mode="w")
f.write(text_format.MessageToString(coord_checkpoint_proto))
f.close()
def get_checkpoint_state(checkpoint_dir, latest_filename=None):
"""Returns CheckpointState proto from the "checkpoint" file.
If the "checkpoint" file contains a valid CheckpointState
proto, returns it.
Args:
checkpoint_dir: The directory of checkpoints.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Returns:
A CheckpointState if the state was available, None
otherwise.
"""
ckpt = None
coord_checkpoint_filename = _GetCheckpointFilename(
checkpoint_dir, latest_filename)
f = None
try:
# Check that the file exists before opening it to avoid
# many lines of errors from colossus in the logs.
if gfile.Exists(coord_checkpoint_filename):
f = gfile.FastGFile(coord_checkpoint_filename, mode="r")
ckpt = CheckpointState()
text_format.Merge(f.read(), ckpt)
except IOError:
# It's ok if the file cannot be read
return None
except text_format.ParseError as e:
logging.warning(str(e))
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
finally:
if f:
f.close()
return ckpt
class Saver(object):
"""Saves and restores variables.
See [Variables](../../how_tos/variables/index.md)
for an overview of variables, saving and restoring.
The `Saver` class adds ops to save and restore variables to and from
*checkpoints*. It also provides convenience methods to run these ops.
Checkpoints are binary files in a proprietary format which map variable names
to tensor values. The best way to examine the contents of a checkpoint is to
load it using a `Saver`.
Savers can automatically number checkpoint filenames with a provided counter.
This lets you keep multiple checkpoints at different steps while training a
model. For example you can number the checkpoint filenames with the training
step number. To avoid filling up disks, savers manage checkpoint files
automatically. For example, they can keep only the N most recent files, or
one checkpoint for every N hours of training.
You number checkpoint filenames by passing a value to the optional
`global_step` argument to `save()`:
```python
saver.save(sess, 'my-model', global_step=0) ==> filename: 'my-model-0'
...
saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000'
```
Additionally, optional arguments to the `Saver()` constructor let you control
the proliferation of checkpoint files on disk:
* `max_to_keep` indicates the maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
* `keep_checkpoint_every_n_hours`: In addition to keeping the most recent
`max_to_keep` checkpoint files, you might want to keep one checkpoint file
for every N hours of training. This can be useful if you want to later
analyze how a model progressed during a long training session. For
example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep
one checkpoint file for every 2 hours of training. The default value of
10,000 hours effectively disables the feature.
Note that you still have to call the `save()` method to save the model.
Passing these arguments to the constructor will not save variables
automatically for you.
A training program that saves regularly looks like:
```python
...
# Create a saver.
saver = tf.train.Saver(...variables...)
# Launch the graph and train, saving the model every 1,000 steps.
sess = tf.Session()
for step in xrange(1000000):
sess.run(..training_op..)
if step % 1000 == 0:
# Append the step number to the checkpoint name:
saver.save(sess, 'my-model', global_step=step)
```
In addition to checkpoint files, savers keep a protocol buffer on disk with
the list of recent checkpoints. This is used to manage numbered checkpoint
files and by `latest_checkpoint()`, which makes it easy to discover the path
to the most recent checkpoint. That protocol buffer is stored in a file named
'checkpoint' next to the checkpoint files.
If you create several savers, you can specify a different filename for the
protocol buffer file in the call to `save()`.
@@__init__
@@save
@@restore
Other utility methods.
@@last_checkpoints
@@set_last_checkpoints
@@as_saver_def
"""
def __init__(self,
var_list=None,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
saver_def=None,
builder=None):
"""Creates a `Saver`.
The constructor adds ops to save and restore variables.
`var_list` specifies the variables that will be saved and restored. It can
be passed as a `dict` or a list:
* A `dict` of names to variables: The keys are the names that will be
used to save or restore the variables in the checkpoint files.
* A list of variables: The variables will be keyed with their op name in
the checkpoint files.
For example:
```python
v1 = tf.Variable(..., name='v1')
v2 = tf.Variable(..., name='v2')
# Pass the variables as a dict:
saver = tf.train.Saver({'v1': v1, 'v2': v2})
# Or pass them as a list.
saver = tf.train.Saver([v1, v2])
# Passing a list is equivalent to passing a dict with the variable op names
# as keys:
saver = tf.train.Saver({v.op.name: v for v in [v1, v2]})
```
The optional `reshape` argument, if `True`, allows restoring a variable from
a save file where the variable had a different shape, but the same number
of elements and type. This is useful if you have reshaped a variable and
want to reload it from an older checkpoint.
The optional `sharded` argument, if `True`, instructs the saver to shard
checkpoints per device.
Args:
var_list: A list of `Variable` objects or a dictionary mapping names to
variables. If `None`, defaults to the list of all variables.
reshape: If `True`, allows restoring parameters from a checkpoint
where the variables have a different shape.
sharded: If `True`, shard the checkpoints, one per device.
max_to_keep: maximum number of recent checkpoints to keep.
Defaults to 10,000 hours.
keep_checkpoint_every_n_hours: How often to keep checkpoints.
Defaults to 10,000 hours.
name: string. Optional name to use as a prefix when adding operations.
restore_sequentially: A `Bool`, which if true, causes restore of different
variables to happen sequentially within each device. This can lower
memory usage when restoring very large models.
saver_def: Optional `SaverDef` proto to use instead of running the
builder. This is only useful for specialty code that wants to recreate
a `Saver` object for a previously built `Graph` that had a `Saver`.
The `saver_def` proto should be the one returned by the
`as_saver_def()` call of the `Saver` that was created for that `Graph`.
builder: Optional `SaverBuilder` to use if a `saver_def` was not provided.
Defaults to `BaseSaverBuilder()`.
Raises:
TypeError: If `var_list` is invalid.
ValueError: If any of the keys or values in `var_list` are not unique.
"""
if saver_def is None:
if builder is None:
builder = BaseSaverBuilder()
if var_list is None:
var_list = variables.all_variables()
if not var_list:
raise ValueError("No variables to save")
saver_def = builder.build(
var_list,
reshape=reshape,
sharded=sharded,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
name=name,
restore_sequentially=restore_sequentially)
if not isinstance(saver_def, saver_pb2.SaverDef):
raise ValueError("saver_def must if a saver_pb2.SaverDef: %s" % saver_def)
if not saver_def.save_tensor_name:
raise ValueError("saver_def must specify the save_tensor_name: %s"
% str(saver_def))
if not saver_def.restore_op_name:
raise ValueError("saver_def must specify the restore_op_name: %s"
% str(saver_def))
self._filename_tensor_name = saver_def.filename_tensor_name
self._save_tensor_name = saver_def.save_tensor_name
self._restore_op_name = saver_def.restore_op_name
self._max_to_keep = saver_def.max_to_keep
# If keep_checkpoint_every_n_hours is not set, set it to 10000 hours.
self._keep_checkpoint_every_n_hours = (
saver_def.keep_checkpoint_every_n_hours if
saver_def.keep_checkpoint_every_n_hours else 10000)
self._next_checkpoint_time = (
time.time() + self._keep_checkpoint_every_n_hours * 3600)
self._sharded = saver_def.sharded
self._last_checkpoints = []
def _CheckpointFilename(self, p):
"""Returns the checkpoint filename given a `(filename, time)` pair.
Args:
p: (filename, time) pair.
Returns:
Checkpoint file name.
"""
name, _ = p
return name
def _MaybeDeleteOldCheckpoints(self, latest_save_path):
"""Deletes old checkpoints if necessary.
Always keep the last `max_to_keep` checkpoints. If
`keep_checkpoint_every_n_hours` was specified, keep an additional checkpoint
every `N` hours. For example, if `N` is 0.5, an additional checkpoint is
kept for every 0.5 hours of training; if `N` is 10, an additional
checkpoint is kept for every 10 hours of training.
Args:
latest_save_path: Name including path of checkpoint file to save.
"""
if not self._max_to_keep:
return
# Remove first from list if the same name was used before.
for p in self._last_checkpoints:
if latest_save_path == self._CheckpointFilename(p):
self._last_checkpoints.remove(p)
# Append new path to list
self._last_checkpoints.append((latest_save_path, time.time()))
# If more than max_to_keep, remove oldest.
if len(self._last_checkpoints) > self._max_to_keep:
p = self._last_checkpoints.pop(0)
# Do not delete the file if we keep_checkpoint_every_n_hours is set and we
# have reached N hours of training.
should_keep = p[1] > self._next_checkpoint_time
if should_keep:
self._next_checkpoint_time += (
self._keep_checkpoint_every_n_hours * 3600)
return
# Otherwise delete the files.
for f in gfile.Glob(self._CheckpointFilename(p)):
try:
gfile.Remove(f)
except OSError as e:
logging.warning("Ignoring: %s", str(e))
def as_saver_def(self):
"""Generates a `SaverDef` representation of this saver.
Returns:
A `SaverDef` proto.
"""
return saver_pb2.SaverDef(
filename_tensor_name=self._filename_tensor_name,
save_tensor_name=self._save_tensor_name,
restore_op_name=self._restore_op_name,
max_to_keep=self._max_to_keep,
keep_checkpoint_every_n_hours=self._keep_checkpoint_every_n_hours,
sharded=self._sharded)
@property
def last_checkpoints(self):
"""List of not-yet-deleted checkpoint filenames.
You can pass any of the returned values to `restore()`.
Returns:
A list of checkpoint filenames, sorted from oldest to newest.
"""
return list(self._CheckpointFilename(p) for p in self._last_checkpoints)
def set_last_checkpoints(self, last_checkpoints):
"""Sets the list of old checkpoint filenames.
Args:
last_checkpoints: A list of checkpoint filenames.
Raises:
AssertionError: If the list of checkpoint filenames has already been set.
"""
assert not self._last_checkpoints
assert isinstance(last_checkpoints, list)
# We use a timestamp of +inf so that this checkpoint will never be
# deleted. This is both safe and backwards compatible to a previous
# version of the code which used s[1] as the "timestamp".
self._last_checkpoints = [(s, np.inf) for s in last_checkpoints]
def save(self, sess, save_path, global_step=None, latest_filename=None):
"""Saves variables.
This method runs the ops added by the constructor for saving variables.
It requires a session in which the graph was launched. The variables to
save must also have been initialized.
The method returns the path of the newly created checkpoint file. This
path can be passed directly to a call to `restore()`.
Args:
sess: A Session to use to save the variables.
save_path: string. Path to the checkpoint filename. If the saver is
`sharded`, this is the prefix of the sharded checkpoint filename.
global_step: If provided the global step number is appended to
`save_path` to create the checkpoint filename. The optional argument
can be a `Tensor`, a `Tensor` name or an integer.
latest_filename: Optional name for the protocol buffer file that will
contains the list of most recent checkpoint filenames. That file,
kept in the same directory as the checkpoint files, is automatically
managed by the saver to keep track of recent checkpoints. Defaults to
'checkpoint'.
Returns:
A string: path at which the variables were saved. If the saver is
sharded, this string ends with: '-?????-of-nnnnn' where 'nnnnn'
is the number of shards created.
Raises:
TypeError: If `sess` is not a `Session`.
"""
if latest_filename is None:
latest_filename = "checkpoint"
if global_step is not None:
if not isinstance(global_step, compat.integral_types):
global_step = training_util.global_step(sess, global_step)
checkpoint_file = "%s-%d" % (save_path, global_step)
else:
checkpoint_file = save_path
save_path = os.path.dirname(save_path)
if not isinstance(sess, session.SessionInterface):
raise TypeError("'sess' must be a Session; %s" % sess)
model_checkpoint_path = sess.run(
self._save_tensor_name, {self._filename_tensor_name: checkpoint_file})
model_checkpoint_path = compat.as_str(model_checkpoint_path)
self._MaybeDeleteOldCheckpoints(model_checkpoint_path)
update_checkpoint_state(save_path, model_checkpoint_path,
self.last_checkpoints, latest_filename)
return model_checkpoint_path
def restore(self, sess, save_path):
"""Restores previously saved variables.
This method runs the ops added by the constructor for restoring variables.
It requires a session in which the graph was launched. The variables to
restore do not have to have been initialized, as restoring is itself a way
to initialize variables.
The `save_path` argument is typically a value previously returned from a
`save()` call, or a call to `latest_checkpoint()`.
Args:
sess: A `Session` to use to restore the parameters.
save_path: Path where parameters were previously saved.
"""
sess.run([self._restore_op_name], {self._filename_tensor_name: save_path})
def latest_checkpoint(checkpoint_dir, latest_filename=None):
"""Finds the filename of latest saved checkpoint file.
Args:
checkpoint_dir: Directory where the variables were saved.
latest_filename: Optional name for the protocol buffer file that
contains the list of most recent checkpoint filenames.
See the corresponding argument to `Saver.save()`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was found.
"""
# Pick the latest checkpoint based on checkpoint state.
ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
if ckpt and ckpt.model_checkpoint_path:
checkpoint_pattern = os.path.join(
checkpoint_dir, ckpt.model_checkpoint_path)
if gfile.Glob(checkpoint_pattern):
return checkpoint_pattern
return None
| apache-2.0 | 8,151,407,964,540,449,000 | 38.041758 | 80 | 0.669022 | false | 4.052932 | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.