repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
madAsket/levitin_algorithms | src/composition_counting_sort.py | 1 | 1750 | # -*- coding: utf-8 -*-
# сортировка массива методом подсчета сравнений
import random
# [65, 59, 46, 62, 14, 25, 78, 22, 47, 79]
# [7, 5, 3, 6, 0, 2, 8, 1, 4, 9]
def ccs(array):
# Создаем массив с нулевыми весами, размером с оригинальный массив
cnt = [0 for i in range(len(array))]
print cnt
# Первый цикл идет по значениям массива от 0 до len-1 (9 итераций)
for i in range(len(array)-1):
print "i",i
# Второй цикл идет от значения i первого цикла 1,2... до len (Сравниваем значения всех старших индексов)
for j in range(i + 1, len(array)):
print "j", j
print "arr i",array[i]
print "arr j",array[j]
# Если старший индекс больше младшего увеличиваем вес его индекса
if array[i] < array[j]:
cnt[j] += 1
else:
# Иначе увеличиваем вес младшего индекса
cnt[i] += 1
print cnt
s = [0 for i in range(len(array))]
# Проставляем значения из старого массива в новый массив, где место в массиве будет его весом.
for i in range(len(array)):
s[cnt[i]] = array[i]
print s
return s
def test_ccs():
# generated_array = [random.randint(1, 100) for i in range(10)]
generated_array = [60, 35, 81, 98, 14, 47]
print generated_array
ccs(generated_array)
test_ccs() | gpl-3.0 | 1,829,407,590,805,079,600 | 31.642857 | 112 | 0.579562 | false | 1.929577 | false | false | false |
RedFantom/ttkwidgets | ttkwidgets/frames/scrolledframe.py | 1 | 4442 | """
Author: RedFantom
License: GNU GPLv3
Source: This repository
"""
# The following sites were used for reference in the creation of this file:
# http://code.activestate.com/recipes/578894-mousewheel-binding-to-scrolling-area-tkinter-multi/
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
import tkinter as tk
from tkinter import ttk
from ttkwidgets import AutoHideScrollbar
class ScrolledFrame(ttk.Frame):
"""
A frame that sports a vertically oriented scrollbar for scrolling.
:ivar interior: :class:`ttk.Frame` in which to put the widgets to be scrolled with any geometry manager.
"""
def __init__(self, master=None, compound=tk.RIGHT, canvasheight=400,
canvaswidth=400, canvasborder=0, autohidescrollbar=True, **kwargs):
"""
Create a ScrolledFrame.
:param master: master widget
:type master: widget
:param compound: "right" or "left": side the scrollbar should be on
:type compound: str
:param canvasheight: height of the internal canvas
:type canvasheight: int
:param canvaswidth: width of the internal canvas
:type canvaswidth: int
:param canvasborder: border width of the internal canvas
:type canvasborder: int
:param autohidescrollbar: whether to use an :class:`~ttkwidgets.AutoHideScrollbar` or a :class:`ttk.Scrollbar`
:type autohidescrollbar: bool
:param kwargs: keyword arguments passed on to the :class:`ttk.Frame` initializer
"""
ttk.Frame.__init__(self, master, **kwargs)
self.rowconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
if autohidescrollbar:
self._scrollbar = AutoHideScrollbar(self, orient=tk.VERTICAL)
else:
self._scrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
self._canvas = tk.Canvas(self, borderwidth=canvasborder, highlightthickness=0,
yscrollcommand=self._scrollbar.set, width=canvaswidth, height=canvasheight)
self.__compound = compound
self._scrollbar.config(command=self._canvas.yview)
self._canvas.yview_moveto(0)
self.interior = ttk.Frame(self._canvas)
self._interior_id = self._canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.interior.bind("<Configure>", self.__configure_interior)
self._canvas.bind("<Configure>", self.__configure_canvas)
self.__grid_widgets()
def __grid_widgets(self):
"""Places all the child widgets in the appropriate positions."""
scrollbar_column = 0 if self.__compound is tk.LEFT else 2
self._canvas.grid(row=0, column=1, sticky="nswe")
self._scrollbar.grid(row=0, column=scrollbar_column, sticky="ns")
def __configure_interior(self, *args):
"""
Private function to configure the interior Frame.
:param args: Tkinter event
"""
# Resize the canvas scrollregion to fit the entire frame
(size_x, size_y) = (self.interior.winfo_reqwidth(), self.interior.winfo_reqheight())
self._canvas.config(scrollregion="0 0 {0} {1}".format(size_x, size_y))
if self.interior.winfo_reqwidth() is not self._canvas.winfo_width():
# If the interior Frame is wider than the canvas, automatically resize the canvas to fit the frame
self._canvas.config(width=self.interior.winfo_reqwidth())
def __configure_canvas(self, *args):
"""
Private function to configure the internal Canvas.
Changes the width of the canvas to fit the interior Frame
:param args: Tkinter event
"""
if self.interior.winfo_reqwidth() is not self._canvas.winfo_width():
self._canvas.configure(width=self.interior.winfo_reqwidth())
def __mouse_wheel(self, event):
"""
Private function to scroll the canvas view.
:param event: Tkinter event
"""
self._canvas.yview_scroll(-1 * (event.delta // 100), "units")
def resize_canvas(self, height=400, width=400):
"""
Function for the user to resize the internal Canvas widget if desired.
:param height: new height in pixels
:type height: int
:param width: new width in pixels
:type width: int
"""
self._canvas.configure(width=width, height=height)
| gpl-3.0 | -2,579,756,312,360,639,000 | 41.304762 | 118 | 0.644529 | false | 4.034514 | true | false | false |
dswd/Swine | shortcutlib.py | 1 | 5570 | ############################################################################
# Copyright (C) 2007 by Dennis Schwerdel, Thomas Schmidt #
# #
# #
# This program is free software; you can redistribute it and or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
import struct, collections
LnkFlags = collections.namedtuple("LnkFlags", ["customIcon", "commandLineArgs", "workingDirectory", "relativePath", "description", "pointsToFileDir", "shellIdList"])
FileAttributes = collections.namedtuple("FileAttributes", ["offline", "compressed", "reparse", "sparse", "temporary", "normal", "ntfsEfs", "archive", "directory", "volumeLabel", "system", "hidden", "readOnly"])
SW_HIDE = 0
SW_NORMAL = 1
SW_SHOWMINIMIZED = 2
SW_SHOWMAXIMIZED = 3
SW_SHOWNOACTIVE = 4
SW_SHOW = 5
SW_MINIMIZE = 6
SW_SHOWMINNOACTIVE = 7
SW_SHOWNA = 8
SW_RESTORE = 9
SW_SHOWDEFAULT = 10
LnkFile = collections.namedtuple("LnkFile", ["lnkFlags", "timeCreate", "timeAccess", "timeModify", "fileLength", "iconIndex", "showWindow", "hotkey", "fileAttributes", "target", "description", "relativePath", "workingDirectory", "commandLineArgs", "customIcon"])
def getBits(byte):
return map(bool,(byte&0x80, byte&0x40, byte&0x20, byte&0x10, byte&0x08, byte&0x04, byte&0x02, byte&0x01))
def winTimeToUnix(time):
return int(time * 0.0000001 - 11644473600)
def readLnkFromFp(fp):
(magic,) = struct.unpack("B3x", fp.read(4))
if magic != 0x4c:
raise Exception("Not valid LNK format")
(guid, lnkFlags) = struct.unpack("<16sB3x", fp.read(20))
lnkFlags = LnkFlags(*(getBits(lnkFlags)[1:]))
if lnkFlags.pointsToFileDir:
(byte1, byte2) = struct.unpack("<2B2x", fp.read(4))
fileAttributes = FileAttributes(*(getBits(byte1)[3:]+getBits(byte2)))
fp.seek(0x1c)
(timeCreate, timeAccess, timeModify) = map(winTimeToUnix, struct.unpack("<3d", fp.read(24)))
(fileLength, iconIndex, showWindow, hotkey) = struct.unpack("<IIBI", fp.read(13))
fp.seek(0x4c)
if lnkFlags.shellIdList:
(itemIdLen,) = struct.unpack("<H", fp.read(2))
itemId = fp.read(itemIdLen)
start = fp.tell()
(structLength, firstOffset, volumeFlags, localVolumeTableOffset, basePathOffset, networkVolumeTableOffset, remainingPathOffset) = struct.unpack("<2IB3x4I", fp.read(28))
onLocalVolume = bool(volumeFlags)
assert firstOffset == 0x1c
if onLocalVolume:
fp.seek(start+localVolumeTableOffset)
(volLength, volType, volSerial, volOffset) = struct.unpack("<IIII", fp.read(16))
assert volOffset == 0x10
fp.seek(start+localVolumeTableOffset+volOffset)
(volumeName, basePathName) = fp.read(remainingPathOffset-(localVolumeTableOffset+volOffset)).rstrip("\x00").split("\x00")
target = basePathName
else:
fp.seek(start+networkVolumeTableOffset)
(length,) = struct.unpack("<I16x", fp.read(20))
volumeName = fp.read(length)
target = volumeName
fp.seek(start+remainingPathOffset)
remainingPath = fp.read(structLength-remainingPathOffset).rstrip("\x00")
target += remainingPath
description = None
if lnkFlags.description:
(length,) = struct.unpack("<H", fp.read(2))
description = fp.read(length*2).decode("UTF-16").rstrip("\x00")
relativePath = None
if lnkFlags.relativePath:
(length,) = struct.unpack("<H", fp.read(2))
relativePath = fp.read(length*2).decode("UTF-16").rstrip("\x00")
workingDirectory = None
if lnkFlags.workingDirectory:
(length,) = struct.unpack("<H", fp.read(2))
workingDirectory = fp.read(length*2).decode("UTF-16").rstrip("\x00")
commandLineArgs = None
if lnkFlags.commandLineArgs:
(length,) = struct.unpack("<H", fp.read(2))
commandLineArgs = fp.read(length*2).decode("UTF-16").rstrip("\x00")
customIcon = None
if lnkFlags.customIcon:
(length,) = struct.unpack("<H", fp.read(2))
customIcon = fp.read(length*2).decode("UTF-16").rstrip("\x00")
return LnkFile(lnkFlags=lnkFlags, timeCreate=timeCreate, timeAccess=timeAccess, timeModify=timeModify, fileLength=fileLength, iconIndex=iconIndex, showWindow=showWindow, hotkey=hotkey, fileAttributes=fileAttributes, target=target, description=description, relativePath=relativePath, workingDirectory=workingDirectory, commandLineArgs=commandLineArgs, customIcon=customIcon)
def readLnk(filename):
with open(filename, "rb") as fp:
return readLnkFromFp(fp)
if __name__ == "__main__":
import sys
print readLnk(sys.argv[1]) | gpl-2.0 | 2,456,863,744,634,209,000 | 49.645455 | 375 | 0.6307 | false | 3.570513 | false | false | false |
carlosabalde/vcc | vcc/ui.py | 1 | 5651 | # -*- coding: utf-8 -*-
'''
:copyright: (c) 2014 by Carlos Abalde, see AUTHORS.txt for more details.
'''
from __future__ import absolute_import
import datetime
import curses
import threading
class UI(threading.Thread):
KEY_ESC = 27
MIN_WIDTH = 12
INTEGER_FORMAT = '%d'
DECIMAL_FORMAT = '%.5f'
def __init__(self, options):
super(UI, self).__init__()
self.daemon = True
self.__options = options
self.__mutex = threading.Lock()
self.__stopping = threading.Event()
self.__pad = None
def run(self):
# General initializations.
pminrow = 0
pmincolumn = 0
wmessage = ' Waiting for data '
lheader = ' Varnish Custom Counters (name=%s, wsize=%d, nwindows=%d)' % (
self.__options.name if self.__options.name is not None else '-',
self.__options.wsize,
self.__options.nwindows,
)
# Screen initializations.
screen = curses.initscr()
curses.cbreak()
curses.noecho()
screen.keypad(1)
screen.timeout(250)
# Event loop.
while not self.stopping:
# Wait (up to 250 ms) for some user input.
ch = screen.getch()
# Extract current screen dimensions (excluding top bar).
srows, scolumns = screen.getmaxyx()
srows -= 1
# Safely render pad contents.
with self.__mutex:
# Select pad to be rendered.
if self.__pad is None:
wmessage = wmessage[1:] + wmessage[0]
pad = curses.newpad(srows, scolumns)
pad.addstr(
int(srows / 2),
max(int(scolumns / 2 - len(wmessage) / 2), 0),
wmessage, curses.A_REVERSE | curses.A_BOLD)
else:
pad = self.__pad
# Extract pad dimensions, expand & update dimensions.
prows, pcolumns = pad.getmaxyx()
pad.resize(max(srows, prows), max(scolumns, pcolumns))
prows, pcolumns = pad.getmaxyx()
# Check requested action, if any.
if ch == ord('q') or ch == ord('Q') or ch == self.KEY_ESC:
self.stop()
elif ch == curses.KEY_RESIZE:
pminrow = 0
pmincolumn = 0
elif ch == curses.KEY_UP or ch == curses.KEY_PPAGE:
pminrow = max(pminrow - srows, 0)
elif ch == curses.KEY_DOWN or ch == curses.KEY_NPAGE:
pminrow = min(pminrow + srows, prows - srows)
elif ch == curses.KEY_LEFT:
pmincolumn = max(pmincolumn - scolumns, 0)
elif ch == curses.KEY_RIGHT:
pmincolumn = min(pmincolumn + scolumns, pcolumns - scolumns)
elif ch != -1:
curses.beep()
# Update top bar.
screen.addstr(0, 0, ' ' * scolumns, curses.A_REVERSE)
if len(lheader) < scolumns:
screen.addstr(0, 0, lheader, curses.A_REVERSE | curses.A_BOLD)
rheader = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S') + ' '
if len(lheader) + len(rheader) < scolumns:
screen.addstr(0, scolumns - len(rheader), rheader, curses.A_REVERSE | curses.A_BOLD)
# Render pad.
pad.refresh(pminrow, pmincolumn, 1, 0, srows, scolumns - 1)
# Destroy screen.
curses.nocbreak()
curses.echo()
screen.keypad(0)
curses.endwin()
def update(self, counters):
pad = None
if len(counters) > 0:
# Sort incoming counters (incrementally by counter name).
counters.sort(key=lambda item: item[0])
# Create new pad.
prows = 2 + len(counters)
pcolumns = 1 + len(max([name for (name, values) in counters], key=len)) + 1
pad = curses.newpad(prows, pcolumns)
# Add first column (counter names).
pad.addstr(1, 0, '-' * pcolumns)
for (i, (name, values)) in enumerate(counters):
pad.addstr(2 + i, 1, name)
# Add rest of columns (counter values).
for offset in range(self.__options.nwindows):
# Render column.
column = ['N-%d ' % offset if offset > 0 else 'N ', '']
for (i, (name, values)) in enumerate(counters):
if offset < len(values) and values[offset] is not None:
value = values[offset]
if isinstance(value, (int, long)):
value = self.INTEGER_FORMAT % value
elif isinstance(value, float):
value = self.DECIMAL_FORMAT % value
column.append(value + ' ')
else:
column.append('- ')
width = max(len(max(column, key=len)) + 1, self.MIN_WIDTH)
column[1] = '-' * width
# Add column.
pcolumns += width
pad.resize(prows, pcolumns)
for (i, value) in enumerate(column):
pad.addstr(i, pcolumns - len(value) - 1, value)
# Safely update pad.
with self.__mutex:
self.__pad = pad
def stop(self):
self.__stopping.set()
@property
def stopping(self):
return self.__stopping.isSet()
| bsd-2-clause | -4,528,734,693,311,094,000 | 35.458065 | 104 | 0.488409 | false | 4.091962 | false | false | false |
anentropic/django-conditional-aggregates | djconnagg/aggregates.py | 1 | 7441 | import django
from django.db.models import Aggregate, Q
from django.db.models.sql.aggregates import Aggregate as SQLAggregate
DJANGO_MAJOR, DJANGO_MINOR, _, _, _ = django.VERSION
def transform_q(q, query):
"""
Replaces (lookup, value) children of Q with equivalent WhereNode objects.
This is a pre-prep of our Q object, ready for later rendering into SQL.
Modifies in place, no need to return.
(We could do this in render_q, but then we'd have to pass the Query object
from ConditionalAggregate down into SQLConditionalAggregate, which Django
avoids to do in their API so we try and follow their lead here)
"""
for i, child in enumerate(q.children):
if isinstance(child, Q):
transform_q(child, query)
else:
# child is (lookup, value) tuple
where_node = query.build_filter(child)
q.children[i] = where_node
def render_q(q, qn, connection):
"""
Renders the Q object into SQL for the WHEN clause.
Uses as much as possible the Django ORM machinery for SQL generation,
handling table aliases, field quoting, parameter escaping etc.
:param q: Q object representing the filter condition
:param qn: db specific 'quote names' function that was passed into
SQLAggregate.as_sql method by Django
:param connection: Django db connection object that was passed into
SQLAggregate.as_sql method by Django
:returns: (SQL template str, params list) tuple
"""
joinstr = u' {} '.format(q.connector)
conditions = []
params = []
if DJANGO_MAJOR == 1 and DJANGO_MINOR == 7:
# in Django 1.7 WhereNode.as_sql expects `qn` to have a `compile`
# method (i.e not really expecting a quote names function any more
# they are expecting a django.db.models.sql.compiler.SQLCompiler)
try:
qn = qn.__self__
except AttributeError:
pass
for child in q.children:
if isinstance(child, Q):
# recurse
condition, child_params = render_q(child, qn, connection)
conditions.append(u'({})'.format(condition))
params.extend(child_params)
else:
try:
# Django 1.7
child, joins_used = child
except TypeError:
# Django 1.6
pass
# we expect child to be a WhereNode (see transform_q)
condition, child_params = child.as_sql(qn, connection)
params.extend(child_params)
conditions.append(condition)
rendered = u'({})'.format(joinstr.join(conditions))
if q.negated:
rendered = u'NOT {}'.format(rendered)
return rendered, params
class SQLConditionalAggregate(SQLAggregate):
"""
An aggregate like Count, Sum, but whose content is a CASE conditional
Like Django Count() and Sum() it can be used in annotate() and aggregate()
"""
is_ordinal = False
is_computed = False
sql_template = (
'%(function)s('
'CASE WHEN %(when_clause)s THEN %(value)s ELSE %(default)s END'
')'
)
def __init__(self, col, when, source=None,
is_summary=False, **extra):
self.when = when
super(SQLConditionalAggregate, self).__init__(col, source=source,
**extra)
def get_value(self, **kwargs):
return kwargs['field_name']
def as_sql(self, qn, connection):
params = []
if hasattr(self.col, 'as_sql'):
field_name, params = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
when_clause, when_params = render_q(
q=self.when,
qn=qn,
connection=connection,
)
params.extend(when_params)
get_val_kwargs = locals()
get_val_kwargs.pop('self')
substitutions = {
'function': self.sql_function,
'when_clause': when_clause,
'value': self.get_value(**get_val_kwargs),
'default': self.default,
}
substitutions.update(self.extra)
return self.sql_template % substitutions, params
class ConditionalAggregate(Aggregate):
"""
Base class for concrete aggregate types
e.g.
ConditionalSum('count', when=Q(stat_type='a', event_type='v'))
First argument is field lookup path, then we expect `when` kwarg
to be a Django Q object representing the filter condition.
"""
SQLClass = None # define on concrete sub-class
def __init__(self, lookup, when, **extra):
self.when = when
super(ConditionalAggregate, self).__init__(lookup, **extra)
def add_to_query(self, query, alias, col, source, is_summary):
# transform simple lookups to WhereNodes:
when = self.when.clone()
transform_q(when, query)
aggregate = self.SQLClass(
col=col,
when=when,
source=source,
is_summary=is_summary,
**self.extra
)
query.aggregates[alias] = aggregate
class ConditionalSum(ConditionalAggregate):
"""
Works like Sum() except only sums rows that match the Q filter.
:param lookup: (as arg) Django __ lookup path to field to sum on
:param when: (as kwarg) a Q object specifying filter condition
Usage:
report = (
Stat.objects
.extra(select={'month': "date_format(time_period, '%%Y-%%m')"})
.values('campaign_id', 'month') # values + annotate = GROUP BY
.annotate(
impressions=ConditionalSum(
'count',
when=Q(stat_type='a', event_type='v')
),
clicks=ConditionalSum(
'count',
when=Q(stat_type='a', event_type='c') & ~Q(detail='e')
)
)
)
"""
name = 'ConditionalSum'
class SQLClass(SQLConditionalAggregate):
sql_function = 'SUM'
is_computed = True
default = 0
class ConditionalCount(ConditionalAggregate):
"""
Works like Count() except only counts rows that match the Q filter.
:param when: (as kwarg) a Q object specifying filter condition
Usage:
report = (
Stat.objects
.extra(select={'month': "date_format(time_period, '%%Y-%%m')"})
.values('campaign_id', 'month') # values + annotate = GROUP BY
.annotate(
impressions=ConditionalCount(
when=Q(stat_type='a', event_type='v')
)
)
)
"""
name = 'ConditionalCount'
def __init__(self, when, **extra):
self.when = when
# NOTE: passing 'id' as the lookup is a bit hacky but Django is
# rigidly expecting a field name here, even though not needed
super(ConditionalAggregate, self).__init__('id', **extra)
class SQLClass(SQLConditionalAggregate):
sql_template = (
'%(function)s('
'CASE WHEN %(when_clause)s THEN %(value)s ELSE %(default)s END'
')'
)
sql_function = 'COUNT'
is_ordinal = True
default = 'NULL'
def get_value(self, **kwargs):
return '1'
| mit | -7,850,309,308,230,748,000 | 30.66383 | 78 | 0.575326 | false | 4.194476 | false | false | false |
6si/shipwright | shipwright/_lib/cache.py | 1 | 6177 | from __future__ import absolute_import
import os
import sys
import traceback
from docker import errors as d_errors
from requests import exceptions as requests_exceptions
from . import compat, docker, push, tar
def pull(client, *args, **kwargs):
try:
for evt in client.pull(*args, **kwargs):
yield compat.json_loads(evt)
except d_errors.NotFound as e:
yield docker.error(e.explanation)
class PullFailedException(Exception):
pass
class CacheMissException(Exception):
pass
_FAILED = object()
class NoCache(object):
def __init__(self, docker_client):
self.docker_client = docker_client
self._pulled_images = {}
def _pull_cache(self, image):
raise CacheMissException()
yield
def tag(self, targets, tags):
for image in targets:
for tag in tags:
yield docker.tag_image(
self.docker_client,
image,
tag,
)
def push(self, targets, tags):
names_and_tags = set()
for image in targets:
names_and_tags.add((image.name, image.ref))
for tag in tags:
names_and_tags.add((image.name, tag))
for evt in push.do_push(self.docker_client, sorted(names_and_tags)):
yield evt
names_and_tags = set()
names_and_tags.add((image.name, image.ref))
for tag in tags:
names_and_tags.add((image.name, tag))
for evt in push.do_push(self.docker_client, sorted(names_and_tags)):
yield evt
def build(self, parent_ref, image):
repo = image.name
tag = image.ref
client = self.docker_client
try:
for evt in self._pull_cache(image):
yield evt
except CacheMissException:
pass
else:
return
# pull the parent if it has not been built because Docker-py fails
# to send the correct credentials in the build command.
if parent_ref:
try:
for evt in self._pull(image.parent, parent_ref):
yield evt
except PullFailedException:
pass
build_evts = client.build(
fileobj=tar.mkcontext(parent_ref, image.path),
rm=True,
custom_context=True,
stream=True,
tag='{0}:{1}'.format(image.name, image.ref),
dockerfile=os.path.basename(image.path),
)
for evt in build_evts:
yield compat.json_loads(evt)
self._pulled_images[(repo, tag)] = True
def _pull(self, repo, tag):
already_pulled = self._pulled_images.get((repo, tag), False)
if already_pulled is _FAILED:
raise PullFailedException()
if already_pulled:
return
client = self.docker_client
failed = False
pull_evts = pull(
client,
repository=repo,
tag=tag,
stream=True,
)
for event in pull_evts:
if 'error' in event:
event['warn'] = event['error']
del event['error']
failed = True
yield event
if failed:
self._pulled_images[(repo, tag)] = _FAILED
raise PullFailedException()
self._pulled_images[(repo, tag)] = True
class Cache(NoCache):
def _pull_cache(self, image):
pull_events = self._pull(repo=image.name, tag=image.ref)
try:
for evt in pull_events:
yield evt
except PullFailedException:
raise CacheMissException()
class DirectRegistry(NoCache):
def __init__(self, docker_client, docker_registry):
super(DirectRegistry, self).__init__(docker_client)
self.drc = docker_registry
self._cache = {}
def _get_manifest(self, tag):
name, ref = tag
try:
return self._cache[tag]
except KeyError:
try:
m = self.drc.get_manifest(name, ref)
except requests_exceptions.HTTPError:
return None
else:
self._cache[tag] = m
return m
def _put_manifest(self, tag, manifest):
name, ref = tag
if manifest is None:
msg = 'manifest does not exist, did the image fail to build?'
yield docker.error(msg)
return
try:
self.drc.put_manifest(name, ref, manifest)
except requests_exceptions.HTTPError:
msg = traceback.format_exception(*sys.exc_info())
yield docker.error(msg)
else:
yield {}
def _pull_cache(self, image):
tag = (image.name, image.ref)
if self._get_manifest(tag) is None:
raise CacheMissException()
return
yield
def tag(self, targets, tags):
"""
A noop operation because we can't tag locally, if we don't have the
built images
"""
return
yield
def push(self, targets, tags):
to_push = set()
to_alias = []
for image in targets:
tag = (image.name, image.ref)
manifest = self._get_manifest(tag)
if manifest is not None:
to_alias.append((tag, manifest))
else:
to_push.add(tag)
sorted_to_push = sorted(to_push)
for evt in push.do_push(self.docker_client, sorted_to_push):
yield evt
for tag in sorted_to_push:
manifest = self._get_manifest(tag)
to_alias.append((tag, manifest))
for (name, ref), manifest in to_alias:
for tag in tags:
dest = (name, tag)
extra = {
'event': 'alias',
'old_image': name + ':' + ref,
'repository': name,
'tag': tag,
}
for evt in self._put_manifest(dest, manifest):
evt.update(extra)
yield evt
| apache-2.0 | -5,070,552,965,968,544,000 | 26.699552 | 76 | 0.525174 | false | 4.242445 | false | false | false |
mpoussevin/NMFViz | nmf_viz.py | 1 | 13049 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import random
import logging
import simplejson as json
import re
import glob
import os
import gzip
import struct
import array
import pickle
def load_pgm(filename, byteorder=">"):
"""Return image data from a raw PGM file as numpy array.
Format specification: http://netpbm.sourceforge.net/doc/pgm.html
"""
with open(filename, "rb") as f:
buff = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buff).groups()
except AttributeError:
raise ValueError(u"Not a raw PGM file: '%s'" % filename)
try:
a = np.frombuffer(buff, dtype=u"u1" if int(maxval) < 256 else byteorder + u"u2", count=int(width) * int(height),
offset=len(header)).reshape((int(height), int(width)))
return a
except Exception as e:
logging.warning("Ignoring image in %s for reason %s", filename, str(e))
return None
def load_mnsit(filename):
logging.info("Loading MNIST data from %s", filename)
with gzip.open(filename) as gf:
magic, size, rows, cols = struct.unpack(">IIII", gf.read(16))
if magic != 2051:
raise IOError("Magic number was expected to be <2049> but was <%d>" % magic)
data = array.array("B", gf.read())
data = [np.array(data[i * rows * cols : (i + 1) * rows * cols]) for i in range(size)]
logging.info("Loaded %d images from %s", len(data), filename)
return data, rows, cols, 255.0
def load_cropped_yale(folder):
paths = [_ for _ in glob.glob(os.path.join(folder, "*.pgm"))]
logging.info("Loading %d images in %s", len(paths), folder)
loaded = [load_pgm(f) for f in glob.glob(os.path.join(folder, "*.pgm"))]
loaded = [x for x in loaded if np.any(x, None)]
logging.info("Successfully loaded %d images out of %d", len(loaded), len(paths))
n_rows, n_cols = loaded[0].shape
logging.info("Images dimensions: %d by %d pixels", n_rows, n_cols)
return loaded, n_rows, n_cols, 255.0
def load_cbcl(filename):
logging.info("Loading data from %s", filename)
with open(filename, "r") as f:
n_examples = int(f.readline())
n_features = int(f.readline())
assert n_features == 361, "Expected number of features to be <361> but was <%d>" % n_features
data = [np.array([float(x) for x in line.strip().split()[:-1]]) for line in f]
logging.info("Loaded %d images from %s", n_features, filename)
return data, 19, 19, 1.0
def load_cifar10(folder):
paths = [_ for _ in glob.glob(os.path.join(folder, "data_batch_*.npy"))]
logging.info("Loading %d batches from %s (and converting images to grayscale)", len(paths), folder)
for p in paths:
logging.info("Loading batch: %s", p)
d = np.load(p)
data = [x for x in d]
logging.info("Loaded %d images from %s", len(data), folder)
return data, 32, 32, 255.0
def load_data(conf):
t = conf["type"]
if t == "Cropped Yale":
data, n_rows, n_cols, norm = load_cropped_yale(conf["path"])
elif t == "MNIST":
data, n_rows, n_cols, norm = load_mnsit(conf["path"])
elif t == "CBCL":
data, n_rows, n_cols, norm = load_cbcl(conf["path"])
elif t == "CIFAR-10":
data, n_rows, n_cols, norm = load_cifar10(conf["path"])
else:
raise ValueError("Invalid type of data: %s (expecting 'Cropped Yale', 'MNIST', 'CBCL' or 'CIFAR-10')" % t)
logging.info("Shuffling images...")
random.shuffle(data)
n_images = min(conf["number"], len(data))
logging.info("Converting to flat vectors, keeping %d images...", n_images)
data = np.vstack((x.flatten() for x in data[:conf["number"]])).transpose() / norm
return data, n_rows, n_cols
class NonnegativeMatrixFactorization:
"""
"Abstract" non-negative matrix factorization.
"""
def __init__(self, n_features, n_examples, components, iterations, loss_name, random_seed=0):
self.n_features = n_features
self.n_examples = n_examples
self.components = components
self.iterations = iterations
self.loss_name = loss_name
np.random.seed(random_seed)
self.W = np.random.random((n_features, components))
self.H = np.random.random((components, n_examples))
class EuclideanLeeSeungNonnegativeMatrixFactorization(NonnegativeMatrixFactorization):
"""
Implementation of the update rules for Mean Squared Error loss as in the paper from Lee & Seung:
Algorithms for non-negative matrix factorization (NIPS 2001)
"""
def __init__(self, n_features, n_examples, components, iterations):
NonnegativeMatrixFactorization.__init__(self, n_features, n_examples, components, iterations, "euclidean")
def update_factors(self, V):
self.H *= np.dot(np.transpose(self.W), V) / np.dot(np.dot(np.transpose(self.W), self.W), self.H)
self.W *= np.dot(V, np.transpose(self.H)) / np.dot(self.W, np.dot(self.H, np.transpose(self.H)))
def compute_loss(self, V):
return np.linalg.norm(V - np.dot(self.W, self.H)) ** 2 / self.n_examples
class DivergenceLeeSeungNonnegativeMatrixFactorization(NonnegativeMatrixFactorization):
"""
Implementation of the update rules for divergence loss (linked to Kullback-Leibler divergence) as in the paper from
Lee & Seung: Algorithms for non-negative matrix factorization (NIPS 2001)
"""
def __init__(self, n_features, n_examples, components, iterations):
NonnegativeMatrixFactorization.__init__(self, n_features, n_examples, components, iterations, "divergence")
def update_factors(self, V):
# The [:, None] is a trick to force correct broadcasting for np.divide
self.H *= np.dot(np.transpose(self.W), V / np.dot(self.W, self.H)) / np.sum(self.W, axis=0)[:, None]
self.W *= np.dot(V / np.dot(self.W, self.H), np.transpose(self.H)) / np.sum(self.H, axis=1)
def compute_loss(self, V):
# Compute WH only once.
WH = np.dot(self.W, self.H)
return np.sum(V * np.log(1e-10 + V / WH) - V + WH) / self.n_examples
class SparseHoyerNonnegativeMatrixFactorization(NonnegativeMatrixFactorization):
"""
Implementation of a sparse nonnegative matrix factorization as in the paper from Patrik O. Hoyer:
Non-negative sparse coding (arXiv)
"""
def __init__(self, n_features, n_examples, components, iterations, sparseness, learning_rate, decay):
NonnegativeMatrixFactorization.__init__(self, n_features, n_examples, components, iterations, "sparse")
self.sparseness = sparseness
self.learning_rate = learning_rate
self.decay = decay
self.W = np.where(self.W < 0.5, 0, self.W)
self.H = np.where(self.H < 0.5, 0, self.H)
def update_factors(self, V):
self.H *= np.dot(np.transpose(self.W), V) / (np.dot(np.dot(np.transpose(self.W), self.W), self.H)
+ self.sparseness)
self.W += self.learning_rate * np.dot(V - np.dot(self.W, self.H), self.H.transpose())
self.W = np.maximum(0, self.W)
self.learning_rate *= self.decay
def compute_loss(self, V):
return np.linalg.norm(V - np.dot(self.W, self.H)) ** 2 / self.n_examples
class SparseL2NonnegativeMatrixFactorization(NonnegativeMatrixFactorization):
"""
Own implementation: sparse on H and L2 on W.
"""
def __init__(self, n_features, n_examples, components, iterations, sparseness, l2, learning_rate, decay):
NonnegativeMatrixFactorization.__init__(self, n_features, n_examples, components, iterations, "sparse L2")
self.sparseness = sparseness
self.learning_rate = learning_rate
self.decay = decay
self.l2 = l2
self.W = np.where(self.W < 0.5, 0, self.W)
self.H = np.where(self.H < 0.5, 0, self.H)
def update_factors(self, V):
self.H *= np.dot(np.transpose(self.W), V) / (np.dot(np.dot(np.transpose(self.W), self.W), self.H)
+ self.sparseness)
self.W += self.learning_rate * (np.dot(V - np.dot(self.W, self.H), self.H.transpose()) - self.l2 * self.W)
self.W = np.maximum(0, self.W)
self.learning_rate *= self.decay
def compute_loss(self, V):
return np.linalg.norm(V - np.dot(self.W, self.H)) ** 2 / self.n_examples
def get_model(n_features, n_examples, conf):
t = conf["type"]
k = conf["components"]
i = conf["iterations"]
if t == "euclidean":
logging.info("Creating nonnegative matrix factorization using Euclidean loss")
return EuclideanLeeSeungNonnegativeMatrixFactorization(n_features, n_examples, k, i)
elif t == "divergence":
logging.info("Creating nonnegative matrix factorization using KL-Divergence loss")
return DivergenceLeeSeungNonnegativeMatrixFactorization(n_features, n_examples, k, i)
elif t == "sparse":
logging.info("Creating nonnegative matrix factorization using Hoyer's sparse loss")
s = conf["sparseness"]
l = conf["learning rate"],
d = conf["learning rate decay"]
return SparseHoyerNonnegativeMatrixFactorization(n_features, n_examples, k, i, s, l, d)
elif t == "sparse-l2":
logging.info("Creating nonnegative matrix factorization using own sparse + L2 loss")
s = conf["sparseness"]
l = conf["learning rate"]
d = conf["learning rate decay"]
l2 = conf["l2"]
return SparseL2NonnegativeMatrixFactorization(n_features, n_examples, k, i, s, l2, l, d)
else:
raise ValueError("Invalid NMF type: {0}".format(conf["type"]))
class ProgressViz:
def __init__(self, model, n_rows, n_cols):
plt.ion()
self.n_rows, self.n_cols = n_rows, n_cols
self.n_comp = model.W.shape[1]
self.sub_rows, self.sub_columns = self.determine_subplots()
self.figure, self.axes = plt.subplots(self.sub_rows, self.sub_columns)
self.figure.suptitle(u"Loss and components -- NMF w/ {0}".format(model.loss_name), size=10)
self.ax_loss = self.axes[0, 0]
self.ax_loss.set_title(u"Loss", size=8)
self.lines, = self.ax_loss.plot([], [], u'o')
self.images = []
for i in range(self.sub_rows * self.sub_columns - 1):
sub_i, sub_j = (1 + i) % self.sub_rows, (1 + i) / self.sub_rows
subplot = self.axes[sub_i, sub_j]
if i < self.n_comp:
self.images.append(subplot.imshow(self.prepare_image(model.W[:, i]), cmap=u"Greys"))
subplot.set_title(u"W[:, %d]" % i, size=8)
subplot.set_axis_off()
else:
# Disable empty subplots
subplot.set_visible(False)
self.ax_loss.set_autoscaley_on(True)
self.ax_loss.set_xlim(0, model.iterations)
self.ax_loss.grid()
self.ax_loss.get_xaxis().set_visible(False)
self.ax_loss.get_yaxis().set_visible(False)
def determine_subplots(self):
nb_plots = self.n_comp + 1
int_squared_root = int(np.sqrt(nb_plots))
return int_squared_root, 1 + int(nb_plots / int_squared_root)
def update_draw(self, iterations, losses, W):
# Update loss
self.lines.set_xdata(iterations)
self.lines.set_ydata(losses)
self.ax_loss.relim()
self.ax_loss.autoscale_view()
# Update mat' fact
for i in range(self.n_comp):
self.images[i].set_data(self.prepare_image(W[:, i]))
self.figure.canvas.draw()
self.figure.canvas.flush_events()
def prepare_image(self, vec):
return 1. - vec.reshape((self.n_rows, self.n_cols))
def wait_end(self):
plt.ioff()
plt.show()
def main(configuration):
logging.info("Setting seed for random generator to %d", configuration["seed"])
data_matrix, n_rows, n_cols = load_data(configuration["data"])
random.seed(configuration["seed"])
n_features, n_examples = data_matrix.shape
logging.info("Data matrix dimensions: %d (features) by %d (examples)", n_features, n_examples)
model = get_model(n_features, n_examples, configuration["nmf"])
p_viz = ProgressViz(model, n_rows, n_cols)
iterations, losses = [], []
for i in range(model.iterations):
model.update_factors(data_matrix)
loss = model.compute_loss(data_matrix)
logging.info("Iteration % 4d => loss: %f", i, loss)
losses.append(loss)
iterations.append(i + 1)
p_viz.update_draw(iterations, losses, model.W)
logging.info(u"Final loss: %f", model.compute_loss(data_matrix))
p_viz.wait_end()
if __name__ == u"__main__":
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
with open("conf.json", "r") as cf:
main(json.load(cf))
| mit | 2,806,228,678,834,713,000 | 41.783607 | 120 | 0.617978 | false | 3.316137 | true | false | false |
spthaolt/socorro | webapp-django/crashstats/topcrashers/views.py | 1 | 10933 | import datetime
import isodate
from collections import defaultdict
from django import http
from django.conf import settings
from django.shortcuts import render, redirect
from django.utils import timezone
from django.utils.http import urlquote
from session_csrf import anonymous_csrf
from crashstats.crashstats import models
from crashstats.crashstats.decorators import (
check_days_parameter,
pass_default_context,
)
from crashstats.supersearch.form_fields import split_on_operator
from crashstats.supersearch.models import (
SuperSearchUnredacted,
)
def get_date_boundaries(parameters):
"""Return the date boundaries in a set of parameters.
Return a tuple with 2 datetime objects, the first one is the lower bound
date and the second one is the upper bound date.
"""
default_date_range = datetime.timedelta(days=7)
greater_than = None
lower_than = None
if not parameters.get('date'):
lower_than = timezone.now()
greater_than = lower_than - default_date_range
else:
for param in parameters['date']:
value = isodate.parse_datetime(split_on_operator(param)[1])
if (
'<' in param and (
not lower_than or
(lower_than and lower_than > value)
)
):
lower_than = value
if (
'>' in param and (
not greater_than or
(greater_than and greater_than < value)
)
):
greater_than = value
if not lower_than:
# add a lower than that is now
lower_than = timezone.now()
if not greater_than:
# add a greater than that is lower_than minus the date range
greater_than = lower_than - default_date_range
return (greater_than, lower_than)
def get_topcrashers_results(**kwargs):
'''Return the results of a search. '''
results = []
params = kwargs
params['_aggs.signature'] = [
'platform',
'is_garbage_collecting',
'hang_type',
'process_type',
'_histogram.uptime',
]
params['_histogram_interval.uptime'] = 60
# We don't care about no results, only facets.
params['_results_number'] = 0
if params.get('process_type') in ('any', 'all'):
params['process_type'] = None
api = SuperSearchUnredacted()
search_results = api.get(**params)
if search_results['total'] > 0:
results = search_results['facets']['signature']
platforms = models.Platforms().get_all()['hits']
platform_codes = [
x['code'] for x in platforms if x['code'] != 'unknown'
]
for i, hit in enumerate(results):
hit['signature'] = hit['term']
hit['rank'] = i + 1
hit['percent'] = 100.0 * hit['count'] / search_results['total']
# Number of crash per platform.
for platform in platform_codes:
hit[platform + '_count'] = 0
sig_platforms = hit['facets']['platform']
for platform in sig_platforms:
code = platform['term'][:3].lower()
if code in platform_codes:
hit[code + '_count'] = platform['count']
# Number of crashes happening during garbage collection.
hit['is_gc_count'] = 0
sig_gc = hit['facets']['is_garbage_collecting']
for row in sig_gc:
if row['term'].lower() == 't':
hit['is_gc_count'] = row['count']
# Number of plugin crashes.
hit['plugin_count'] = 0
sig_process = hit['facets']['process_type']
for row in sig_process:
if row['term'].lower() == 'plugin':
hit['plugin_count'] = row['count']
# Number of hang crashes.
hit['hang_count'] = 0
sig_hang = hit['facets']['hang_type']
for row in sig_hang:
# Hangs have weird values in the database: a value of 1 or -1
# means it is a hang, a value of 0 or missing means it is not.
if row['term'] in (1, -1):
hit['hang_count'] += row['count']
# Number of startup crashes.
hit['startup_percent'] = 0
sig_startup = hit['facets']['histogram_uptime']
for row in sig_startup:
if row['term'] == 0:
ratio = 1.0 * row['count'] / hit['count']
hit['startup_crash'] = ratio > 0.5
# Run the same query but for the previous date range, so we can
# compare the rankings and show rank changes.
dates = get_date_boundaries(params)
delta = (dates[1] - dates[0]) * 2
params['date'] = [
'>=' + (dates[1] - delta).isoformat(),
'<' + dates[0].isoformat()
]
params['_aggs.signature'] = [
'platform',
]
previous_range_results = api.get(**params)
total = previous_range_results['total']
compare_signatures = {}
if total > 0 and 'signature' in previous_range_results['facets']:
signatures = previous_range_results['facets']['signature']
for i, hit in enumerate(signatures):
compare_signatures[hit['term']] = {
'count': hit['count'],
'rank': i + 1,
'percent': 100.0 * hit['count'] / total
}
for hit in results:
sig = compare_signatures.get(hit['term'])
if sig:
hit['diff'] = hit['percent'] - sig['percent']
hit['rank_diff'] = hit['rank'] - sig['rank']
hit['previous_percent'] = sig['percent']
else:
hit['diff'] = 'new'
hit['rank_diff'] = 0
hit['previous_percent'] = 0
return search_results
@pass_default_context
@anonymous_csrf
@check_days_parameter([1, 3, 7, 14, 28], default=7)
def topcrashers(request, days=None, possible_days=None, default_context=None):
context = default_context or {}
product = request.GET.get('product')
versions = request.GET.get('version')
crash_type = request.GET.get('process_type')
os_name = request.GET.get('platform')
result_count = request.GET.get('_facets_size')
tcbs_mode = request.GET.get('_tcbs_mode')
if not tcbs_mode or tcbs_mode not in ('realtime', 'byday'):
tcbs_mode = 'realtime'
if product not in context['releases']:
raise http.Http404('Unrecognized product')
if not versions:
# :(
# simulate what the nav.js does which is to take the latest version
# for this product.
for release in context['currentversions']:
if release['product'] == product and release['featured']:
url = '%s&version=%s' % (
request.build_absolute_uri(), urlquote(release['version'])
)
return redirect(url)
else:
versions = versions.split(';')
if len(versions) == 1:
context['version'] = versions[0]
release_versions = [x['version'] for x in context['releases'][product]]
if context['version'] not in release_versions:
raise http.Http404('Unrecognized version')
if tcbs_mode == 'realtime':
end_date = datetime.datetime.utcnow().replace(microsecond=0)
elif tcbs_mode == 'byday':
end_date = datetime.datetime.utcnow().replace(
hour=0, minute=0, second=0, microsecond=0
)
if crash_type not in settings.PROCESS_TYPES:
crash_type = 'browser'
context['crash_type'] = crash_type
os_api = models.Platforms()
operating_systems = os_api.get()
if os_name not in (os_['name'] for os_ in operating_systems):
os_name = None
context['os_name'] = os_name
# set the result counts filter in the context to use in
# the template. This way we avoid hardcoding it twice and
# have it defined in one common location.
context['result_counts'] = settings.TCBS_RESULT_COUNTS
if result_count not in context['result_counts']:
result_count = settings.TCBS_RESULT_COUNTS[0]
context['result_count'] = result_count
context['query'] = {
'mode': tcbs_mode,
'end_date': end_date,
'start_date': end_date - datetime.timedelta(days=days),
}
api_results = get_topcrashers_results(
product=product,
version=context['version'],
platform=os_name,
process_type=crash_type,
date=[
'<' + end_date.isoformat(),
'>=' + context['query']['start_date'].isoformat()
],
_facets_size=result_count,
)
tcbs = api_results['facets']['signature']
count_of_included_crashes = 0
signatures = []
for crash in tcbs[:int(result_count)]:
signatures.append(crash['signature'])
count_of_included_crashes += crash['count']
context['number_of_crashes'] = count_of_included_crashes
context['total_percentage'] = api_results['total'] and (
100.0 * count_of_included_crashes / api_results['total']
)
bugs = defaultdict(list)
api = models.Bugs()
if signatures:
for b in api.get(signatures=signatures)['hits']:
bugs[b['signature']].append(b['id'])
for crash in tcbs:
crash_counts = []
# Due to the inconsistencies of OS usage and naming of
# codes and props for operating systems the hacky bit below
# is required. Socorro and the world will be a better place
# once https://bugzilla.mozilla.org/show_bug.cgi?id=790642 lands.
for operating_system in operating_systems:
if operating_system['name'] == 'Unknown':
# not applicable in this context
continue
os_code = operating_system['code'][0:3].lower()
key = '%s_count' % os_code
crash_counts.append([crash[key], operating_system['name']])
crash['correlation_os'] = max(crash_counts)[1]
sig = crash['signature']
if sig in bugs:
if 'bugs' in crash:
crash['bugs'].extend(bugs[sig])
else:
crash['bugs'] = bugs[sig]
if 'bugs' in crash:
crash['bugs'].sort(reverse=True)
context['tcbs'] = tcbs
context['days'] = days
context['possible_days'] = possible_days
context['total_crashing_signatures'] = len(signatures)
context['total_number_of_crashes'] = api_results['total']
context['process_type_values'] = (
x for x in settings.PROCESS_TYPES if x != 'all'
)
context['platform_values'] = settings.DISPLAY_OS_NAMES
return render(request, 'topcrashers/topcrashers.html', context)
| mpl-2.0 | -1,486,760,568,614,625,500 | 32.53681 | 78 | 0.562792 | false | 4.076435 | false | false | false |
rkoshak/sensorReporter | gpio/rpi_gpio.py | 1 | 7551 | # Copyright 2020 Richard Koshak
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains RPI GPIO sensors, actuators, and connections.
Classes:
- RpiGpioSensor: Reports on the state of a GPIO Pin.
- RpiGpioActuator: Sets a pin to HIGH or LOW on command.
"""
from time import sleep
from configparser import NoOptionError
import RPi.GPIO as GPIO
from core.sensor import Sensor
from core.actuator import Actuator
from core.utils import parse_values
# Set to use BCM numbering.
GPIO.setmode(GPIO.BCM)
class RpiGpioSensor(Sensor):
"""Publishes the current state of a configured GPIO pin."""
def __init__(self, publishers, params):
"""Initializes the connection to the GPIO pin and if "EventDetection"
if defined and valid, will subscibe fo events. If missing, than it
requires the "Poll" parameter be defined and > 0. By default it will
publish CLOSED/OPEN for 0/1 which can be overridden by the "Values" which
should be a comma separated list of two paameters, the first one is
CLOSED and second one is OPEN.
Parameters:
- "Pin": the GPIO pin in BCM numbering
- "Values": Alternative values to publish for 0 and 1, defaults to
CLOSED and OPEN for 0 and 1 respectively.
- "PUD": Pull up or down setting, if "UP" uses PULL_UP, all other
values result in PULL_DOWN.
- "EventDetection": when set instead of depending on sensor_reporter
to poll it will reliy on the event detection built into the GPIO
library. Valid values are "RISING", "FALLING" and "BOTH". When not
defined "Poll" must be set to a positive value.
"""
super().__init__(publishers, params)
self.pin = int(params("Pin"))
# Allow users to override the 0/1 pin values.
self.values = parse_values(params, ["CLOSED", "OPEN"])
self.log.debug("Configured %s for CLOSED and %s for OPEN", self.values[0], self.values[1])
pud = GPIO.PUD_UP if params("PUD") == "UP" else GPIO.PUD_DOWN
GPIO.setup(self.pin, GPIO.IN, pull_up_down=pud)
# Set up event detection.
try:
event_detection = params("EventDetection")
event_map = {"RISING": GPIO.RISING, "FALLING": GPIO.FALLING, "BOTH": GPIO.BOTH}
if event_detection not in event_map:
self.log.error("Invalid event detection specified: %s, one of RISING,"
" FALLING, BOTH or NONE are the only allowed values. "
"Defaulting to NONE",
event_detection)
event_detection = "NONE"
except NoOptionError:
self.log.info("No event detection specified, falling back to polling")
event_detection = "NONE"
if event_detection != "NONE":
GPIO.add_event_detect(self.pin, event_map[event_detection],
callback=lambda channel: self.check_state())
self.state = GPIO.input(self.pin)
self.destination = params("Destination")
if self.poll < 0 and event_detection == "NONE":
raise ValueError("Event detection is NONE but polling is OFF")
if self.poll > 0 and event_detection != "NONE":
raise ValueError("Event detection is {} but polling is {}"
.format(event_detection, self.poll))
self.log.info("Configured RpiGpioSensor: pin %d on destination %s with PUD %s"
" and event detection %s", self.pin, self.destination, pud,
event_detection)
# We've a first reading so publish it.
self.publish_state()
def check_state(self):
"""Checks the current state of the pin and if it's different from the
last state publishes it. With event detection this method gets called
when the GPIO pin changed states. When polling this method gets called
on each poll.
"""
value = GPIO.input(self.pin)
if value != self.state:
self.log.info("Pin %s changed from %s to %s", self.pin, self.state, value)
self.state = value
self.publish_state()
def publish_state(self):
"""Publishes the current state of the pin."""
msg = self.values[0] if self.state == GPIO.LOW else self.values[1]
self._send(msg, self.destination)
def cleanup(self):
"""Disconnects from the GPIO subsystem."""
GPIO.cleanup()
class RpiGpioActuator(Actuator):
"""Allows for setting a GPIO pin to high or low on command. Also supports
toggling.
"""
def __init__(self, connections, params):
"""Initializes the GPIO subsystem and sets the pin to the InitialState.
If InitialState is not povided in paams it defaults to GPIO.HIGH. If
"Toggle" is defined on any message will result in the pin being set to
HIGH for half a second and then back to LOW.
Parameters:
- "Pin": The GPIO pin in BCM numbering
- "InitialState": The pin state to set when coming online, defaults
to "OFF".
- "Toggle": Optional parameter that when set to "True" causes any
message received to result in setting the pin to HIGH, sleep for
half a second, then back to LOW.
"""
super().__init__(connections, params)
self.pin = int(params("Pin"))
GPIO.setup(self.pin, GPIO.OUT)
out = GPIO.LOW
try:
out = GPIO.HIGH if params("InitialState") == "ON" else GPIO.LOW
except NoOptionError:
pass
GPIO.output(self.pin, out)
self.toggle = bool(params("Toggle"))
self.log.info("Configued RpoGpuiActuator: pin %d on destination %s with "
"toggle %s", self.pin, self.cmd_src, self.toggle)
def on_message(self, msg):
"""Called when the actuator receives a message. If Toggle is not enabled
sets the pin to HIGH if the message is ON and LOW if the message is OFF.
"""
self.log.info("Received command on %s: %s Toggle = %s Pin = %d",
self.cmd_src, msg, self.toggle, self.pin)
# Toggle on then off.
if self.toggle:
self.log.info("Toggling pin %s HIGH to LOW", self.pin)
GPIO.output(self.pin, GPIO.LOW)
sleep(.5)
self.log.info("Toggling pin %s LOW to HIGH", self.pin)
GPIO.output(self.pin, GPIO.HIGH)
# Turn ON/OFF based on the message.
else:
out = None
if msg == "ON":
out = GPIO.HIGH
elif msg == "OFF":
out = GPIO.LOW
if out == None:
self.log.error("Bad command %s", msg)
else:
self.log.info("Setting pin %d to %s", self.pin,
"HIGH" if out == GPIO.HIGH else "LOW")
GPIO.output(self.pin, out)
| apache-2.0 | -4,835,190,813,067,179,000 | 40.489011 | 98 | 0.602039 | false | 4.097124 | false | false | false |
iogf/vy | vyapp/plugins/tidy.py | 1 | 1835 | """
Overview
========
Show errors in html files by running Tidy.
Extern dependencies:
Html Tidy
Key-Commands
============
Namespace: tidy
Mode: HTML
Event: <Key-h>
Description: Run Tidy on the current html file and display
a dialog window with all encountered errors. When the dialog
window is shown with the errors it is possible to jump to the
error line by pressing <Return>.
Commands
========
Command: html_errors()
Description: Same as the keycommand <Key-h>.
"""
from subprocess import Popen, STDOUT, PIPE
from vyapp.widgets import LinePicker
from vyapp.areavi import AreaVi
from vyapp.plugins import ENV
from vyapp.app import root
from re import findall
import sys
class HtmlChecker(object):
PATH = 'tidy'
def __init__(self, area):
self.area = area
def check(self):
child = Popen([self.PATH, '--show-body-only', '1', '-e', '-quiet',
self.area.filename], stdout=PIPE, stderr=STDOUT,
encoding=self.area.charset)
output = child.communicate()[0]
regex = 'line ([0-9]+) column ([0-9]+) - (.+)'
ranges = findall(regex, output)
ranges = map(lambda ind: (self.area.filename, ind[0], ind[2]), ranges)
sys.stdout.write('Errors:\n%s\n' % output)
self.area.chmode('NORMAL')
if child.returncode:
self.display(ranges)
else:
root.status.set_msg('No errors!')
def display(self, ranges):
root.status.set_msg('Errors were found!' )
options = LinePicker()
options(ranges)
def install(area):
html_checker = HtmlChecker(area)
picker = lambda event: html_checker.check()
area.install('tidy', ('HTML', '<Key-h>', picker))
def html_errors():
html_checker = HtmlChecker(AreaVi.ACTIVE)
html_checker.check()
ENV['html_errors'] = html_errors
| mit | 1,064,562,167,593,228,000 | 22.831169 | 78 | 0.637602 | false | 3.528846 | false | false | false |
tanmaykm/edx-platform | lms/djangoapps/verify_student/admin.py | 7 | 2230 | # encoding: utf-8
"""
Admin site configurations for verify_student.
"""
from config_models.admin import ConfigurationModelAdmin
from ratelimitbackend import admin
from lms.djangoapps.verify_student.models import (
IcrvStatusEmailsConfiguration,
SkippedReverification,
SoftwareSecurePhotoVerification,
VerificationStatus,
)
class SoftwareSecurePhotoVerificationAdmin(admin.ModelAdmin):
"""
Admin for the SoftwareSecurePhotoVerification table.
"""
list_display = ('id', 'user', 'status', 'receipt_id', 'submitted_at', 'updated_at')
raw_id_fields = ('user', 'reviewing_user')
search_fields = (
'receipt_id', 'user__username'
)
class VerificationStatusAdmin(admin.ModelAdmin):
"""
Admin for the VerificationStatus table.
"""
list_display = ('timestamp', 'user', 'status', 'checkpoint')
readonly_fields = ()
search_fields = ('checkpoint__checkpoint_location', 'user__username')
raw_id_fields = ('user',)
def get_readonly_fields(self, request, obj=None):
"""When editing an existing record, all fields should be read-only.
VerificationStatus records should be immutable; to change the user's
status, create a new record with the updated status and a more
recent timestamp.
"""
if obj:
return self.readonly_fields + ('status', 'checkpoint', 'user', 'response', 'error')
return self.readonly_fields
class SkippedReverificationAdmin(admin.ModelAdmin):
"""Admin for the SkippedReverification table. """
list_display = ('created_at', 'user', 'course_id', 'checkpoint')
raw_id_fields = ('user',)
readonly_fields = ('user', 'course_id')
search_fields = ('user__username', 'course_id', 'checkpoint__checkpoint_location')
def has_add_permission(self, request):
"""Skipped verifications can't be created in Django admin. """
return False
admin.site.register(SoftwareSecurePhotoVerification, SoftwareSecurePhotoVerificationAdmin)
admin.site.register(SkippedReverification, SkippedReverificationAdmin)
admin.site.register(VerificationStatus, VerificationStatusAdmin)
admin.site.register(IcrvStatusEmailsConfiguration, ConfigurationModelAdmin)
| agpl-3.0 | 6,437,314,400,052,727,000 | 33.84375 | 95 | 0.70583 | false | 4.17603 | true | false | false |
leiferikb/bitpop | src/native_client/pnacl/driver/pnacl-abicheck.py | 4 | 1126 | #!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# IMPORTANT NOTE: If you make local mods to this file, you must run:
# % pnacl/build.sh driver
# in order for them to take effect in the scons build. This command
# updates the copy in the toolchain/ tree.
#
from driver_env import env
import driver_tools
import filetype
import pathtools
EXTRA_ENV = {
'ARGS' : '',
}
PATTERNS = [
('(.*)', "env.append('ARGS', $0)"),
]
def main(argv):
env.update(EXTRA_ENV)
driver_tools.ParseArgs(argv, PATTERNS)
args = env.get('ARGS')
input = pathtools.normalize(args[-1])
if filetype.IsPNaClBitcode(input):
env.append('ARGS', '--bitcode-format=pnacl')
driver_tools.Run('"${PNACL_ABICHECK}" ${ARGS}')
return 0;
# Don't just call the binary with -help because most of those options are
# completely useless for this tool.
def get_help(unused_argv):
return """
USAGE: pnacl-abicheck <input bitcode>
If <input bitcode> is -, then standard input will be read.
"""
| gpl-3.0 | 8,104,783,777,869,809,000 | 25.809524 | 73 | 0.693606 | false | 3.254335 | false | false | false |
andrew-lundgren/gwpy | docs/ex2rst.py | 1 | 3425 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Convert GWpy example python files into rst files for sphinx documentation
"""
__author__ = 'Duncan Macleod <[email protected]'
import sys
import os
import argparse
import re
METADATA = {
'author': 'sectionauthor',
'currentmodule': 'currentmodule',
}
# -----------------------------------------------------------------------------
# parse command line
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('infile', metavar='example.py',
help='python file to convert',)
parser.add_argument('outfile', metavar='example.rst', nargs='?',
help='rst file to write, default: print to screen')
args = parser.parse_args()
# -----------------------------------------------------------------------------
# parse python file
ref = '-'.join(os.path.splitext(args.infile)[0].split(os.path.sep)[-2:])
lines = open(args.infile, 'rb').read().splitlines()
output = []
header = ['.. _example-%s:\n' % ref]
indoc = False
incode = False
reset = True
for i,line in enumerate(lines):
# skip file header
if len(output) == 0 and line.startswith('#'):
continue
# hide lines
if line.endswith('# hide'):
continue
# find block docs
if line.startswith('"""'):
indoc = not indoc
line = line.strip('"')
# skip empty lines not in a block quote
if not line and not indoc:
if output:
output.append('')
continue
# find code
if incode and line.startswith(('"', '#', '__')):
incode = False
output.append('')
# comments
if line.startswith('#'):
output.append(line[2:])
# metadata
elif line.startswith('__'):
key, value = map(lambda x: x.strip(' _="\'').rstrip(' _="\''),
line.split('=', 1))
if key in METADATA:
header.append('.. %s:: %s\n' % (METADATA[key], value))
# block quote
elif indoc:
output.append(line.strip('"').rstrip('"'))
# code
else:
if not incode:
output.extend(('', '.. plot::', ' :include-source:'))
if reset:
output.append(' :context: reset')
reset = False
else:
output.append(' :context:')
output.append('')
output.append(' %s' % line)
incode = True
# end block quote
if line.endswith('"""') and indoc:
output.append('')
indoc = False
if len(output) == 1:
output.append('#'*len(output[0]))
output = header + output
if args.outfile:
with open(args.outfile, 'w') as f:
f.write('\n'.join(output))
else:
print('\n'.join(output))
| gpl-3.0 | -4,909,058,951,072,356,000 | 26.4 | 79 | 0.565255 | false | 3.896473 | false | false | false |
SolutionsCloud/apidoc | apidoc/factory/config.py | 1 | 1773 | import os
from apidoc.object.config import Config as ConfigObject
from apidoc.service.parser import Parser
from apidoc.service.merger import Merger
from apidoc.service.validator import Validator
from apidoc.lib.util.decorator import add_property
@add_property("validator", Validator)
@add_property("parser", Parser)
@add_property("merger", Merger)
class Config():
"""Create config objects
"""
def load_from_file(self, config_file):
"""Load a config object from a file
"""
merger = self.merger
parser = self.parser
datas = parser.load_from_file(config_file)
self.validator.validate_config(datas)
if datas is None or datas == {}:
config = ConfigObject()
else:
config = merger.merge_configs(ConfigObject(), [datas])
self.fix_all_path(config, os.path.dirname(config_file))
return config
def fix_all_path(self, config, root_path):
"""Fix config's content's relative path by injecting config location
"""
if config["input"]["locations"] is not None:
config["input"]["locations"] = [self.fix_path(x, root_path) for x in config["input"]["locations"]]
if config["output"]["location"] not in ("stdout", ):
config["output"]["location"] = self.fix_path(config["output"]["location"], root_path)
if config["output"]["template"] not in ("default", ):
config["output"]["template"] = self.fix_path(config["output"]["template"], root_path)
def fix_path(self, path, root_path):
"""Fix a relative path
"""
if path is not None:
if not os.path.exists(path):
path = os.path.realpath(os.path.join(root_path, path))
return path
| gpl-3.0 | 7,689,680,059,595,804,000 | 33.764706 | 110 | 0.620981 | false | 3.922566 | true | false | false |
timothycrosley/hug | hug/validate.py | 1 | 2551 | """hug/validate.py
Defines hugs built-in validation methods
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
def all(*validators):
"""Validation only succeeds if all passed in validators return no errors"""
def validate_all(fields):
for validator in validators:
errors = validator(fields)
if errors:
return errors
validate_all.__doc__ = " and ".join(validator.__doc__ for validator in validators)
return validate_all
def any(*validators):
"""If any of the specified validators pass the validation succeeds"""
def validate_any(fields):
errors = {}
for validator in validators:
validation_errors = validator(fields)
if not validation_errors:
return
errors.update(validation_errors)
return errors
validate_any.__doc__ = " or ".join(validator.__doc__ for validator in validators)
return validate_any
def contains_one_of(*fields):
"""Enables ensuring that one of multiple optional fields is set"""
message = "Must contain any one of the following fields: {0}".format(", ".join(fields))
def check_contains(endpoint_fields):
for field in fields:
if field in endpoint_fields:
return
errors = {}
for field in fields:
errors[field] = "one of these must have a value"
return errors
check_contains.__doc__ = message
return check_contains
| mit | -5,882,885,373,735,339,000 | 35.971014 | 112 | 0.698942 | false | 4.66362 | false | false | false |
Go-In/go-coup | web/signaldetect/signals.py | 1 | 3643 | from django.db.models.signals import pre_save, pre_delete, post_save, post_delete
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.models import User, Group
import datetime
from usermanage.models import Customer
from customermanage.models import Coupon
from storemanage.models import Ticket
from social_django.models import UserSocialAuth
from django.db import models
@receiver(post_save, sender = Customer)
def test_handler(sender, **kwargs):
print(sender)
print("user created")
@receiver(post_save, sender = Coupon)
def coupon_handler(sender, instance, **kwargs):
ticket = instance.ticket
if instance.active:
if 'purchase_all' not in ticket.stat:
ticket.stat['purchase_all'] = 0
ticket.stat['purchase_all'] += 1
if 'purchase_by_date' not in ticket.stat:
ticket.stat['purchase_by_date'] = dict()
today = datetime.date.today().strftime("%Y-%m-%d")
if today not in ticket.stat['purchase_by_date']:
ticket.stat['purchase_by_date'][today] = 0
ticket.stat['purchase_by_date'][today] += 1
else:
if 'use_all' not in ticket.stat:
ticket.stat['use_all'] = 0
ticket.stat['use_all'] += 1
if 'use_by_date' not in ticket.stat:
ticket.stat['use_by_date'] = dict()
today = datetime.date.today().strftime("%Y-%m-%d")
if today not in ticket.stat['use_by_date']:
ticket.stat['use_by_date'][today] = 0
ticket.stat['use_by_date'][today] += 1
ticket.save()
@receiver(post_save, sender = UserSocialAuth)
def test_social(sender, instance, **kwargs):
# print("HELLO")
# print(instance)
# print(sender)
user = instance.user
# data = {'username':user.username,'email':user.email,'first_name':user.first_name}
print(user.first_name)
print(user.last_name)
groups = list(user.groups.values_list('name', flat=True))
print('HELLO')
print(groups)
check = 'customer' not in groups
print(check)
if 'customer' not in groups:
g = Group.objects.get(name='customer')
g.user_set.add(user)
user.save()
g.save()
customerprofile = Customer(user = user, first_name = user.first_name, last_name = user.last_name)
customerprofile.save()
# if 'customer' in groups:
# g = Group.objects.get(name='customer')
# g.user_set.add(user)
# user.save()
# g.save()
# customerprofile = Customer(user = user, )
# customerprofile.save()
# data = {'username':user.username,'email':user.email}
# if user.groups.filter(name='customer').exists():
# customer = models.Customer.objects.get(user=user)
# data['first_name']=customer.first_name
# data['last_name']=customer.last_name
# return render(request,'usermanage/profile.html', context={'d':data})
# user = request.user
# data = {'username':user.username,'email':user.email}
# if user.groups.filter(name='store').exists():
# store = models.Store.objects.get(user=user)
# data['store_name']=store.store_name
# elif user.groups.filter(name='customer').exists():
# customer = models.Customer.objects.get(user=user)
# data['first_name']=customer.first_name
# data['last_name']=customer.last_name
# return render(request,'usermanage/profile.html', context={'d':data})
# customerprofile = models.Customer(user = user, first_name = data['first_name'], last_name = data['last_name'])
# customerprofile.save()
# return redirect('index:index')
| mit | 1,063,608,146,051,341,400 | 33.046729 | 116 | 0.634642 | false | 3.557617 | false | false | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/ad_service/client.py | 1 | 21192 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v6.common.types import ad_type_infos
from google.ads.googleads.v6.common.types import custom_parameter
from google.ads.googleads.v6.common.types import final_app_url
from google.ads.googleads.v6.common.types import url_collection
from google.ads.googleads.v6.enums.types import ad_type
from google.ads.googleads.v6.enums.types import device
from google.ads.googleads.v6.enums.types import system_managed_entity_source
from google.ads.googleads.v6.resources.types import ad
from google.ads.googleads.v6.services.types import ad_service
from .transports.base import AdServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AdServiceGrpcTransport
class AdServiceClientMeta(type):
"""Metaclass for the AdService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[AdServiceTransport]]
_transport_registry['grpc'] = AdServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[AdServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdServiceClient(metaclass=AdServiceClientMeta):
"""Service to manage ads."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def ad_path(customer_id: str,ad_id: str,) -> str:
"""Return a fully-qualified ad string."""
return "customers/{customer_id}/ads/{ad_id}".format(customer_id=customer_id, ad_id=ad_id, )
@staticmethod
def parse_ad_path(path: str) -> Dict[str,str]:
"""Parse a ad path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/ads/(?P<ad_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AdServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdServiceTransport):
# transport is a AdServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad(self,
request: ad_service.GetAdRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad.Ad:
r"""Returns the requested ad in full detail.
Args:
request (:class:`google.ads.googleads.v6.services.types.GetAdRequest`):
The request object. Request message for
[AdService.GetAd][google.ads.googleads.v6.services.AdService.GetAd].
resource_name (:class:`str`):
Required. The resource name of the ad
to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.resources.types.Ad:
An ad.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a ad_service.GetAdRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_service.GetAdRequest):
request = ad_service.GetAdRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_ad]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def mutate_ads(self,
request: ad_service.MutateAdsRequest = None,
*,
customer_id: str = None,
operations: Sequence[ad_service.AdOperation] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_service.MutateAdsResponse:
r"""Updates ads. Operation statuses are returned.
Updating ads is not supported for TextAd,
ExpandedDynamicSearchAd, GmailAd and ImageAd.
Args:
request (:class:`google.ads.googleads.v6.services.types.MutateAdsRequest`):
The request object. Request message for
[AdService.MutateAds][google.ads.googleads.v6.services.AdService.MutateAds].
customer_id (:class:`str`):
Required. The ID of the customer
whose ads are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v6.services.types.AdOperation]`):
Required. The list of operations to
perform on individual ads.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.services.types.MutateAdsResponse:
Response message for an ad mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a ad_service.MutateAdsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_service.MutateAdsRequest):
request = ad_service.MutateAdsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_ads]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('customer_id', request.customer_id),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'AdServiceClient',
)
| apache-2.0 | -4,245,362,499,694,073,300 | 42.337423 | 106 | 0.616837 | false | 4.520478 | false | false | false |
chosenone75/Neural-Networks | DUCTextSummary/CNN4DUCSummary.py | 1 | 5220 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 3 10:26:23 2017
@author: chosenone
CNN for DUC Doc Summmary
Structure:
A embedding lay,followed by a convolutional lay and a max-pooling lay,at last
regression(maybe) layer.
"""
import tensorflow as tf
import numpy as np
class CNN4DUCSummary(object):
def __init__(self,sequence_length,num_classes,vocab_size,embedding_size
,filter_sizes,feature_size,num_filters,word_embedding=None,
fine_tune=False,l2_reg_lambda=0.0):
# placeholders for input,output and dropout
if fine_tune and word_embedding == None:
raise("Value Error:there must be a copy of initial value of word embedding")
with tf.name_scope(name="input"):
self._input_x = tf.placeholder(tf.int32,[None,sequence_length],name='x')
self._input_y = tf.placeholder(tf.float32,[None,num_classes],name = 'y')
self._keep_prob = tf.placeholder(tf.float32,name = 'keep_prop')
self._features = tf.placeholder(tf.float32,[None,feature_size],name="feature")
# l2 regularization if needed
l2_loss = tf.constant(0.0,tf.float32)
# embedding layer
with tf.device('/cpu:0'),tf.name_scope('embedding'):
# fine-tuning or not
if not fine_tune:
self._embeddings = tf.Variable(tf.random_uniform([vocab_size,embedding_size],-1.0,1.0),
name="embedding",trainable=True)
else:
self._embeddings = tf.Variable(word_embedding,name="embedding-fine-tuned",trainable=True)
self._embedded_words = tf.nn.embedding_lookup(self._embeddings,self._input_x)
self._embedded_words_expanded = tf.expand_dims(self._embedded_words,-1)
# creat a convolution layer and pool layer for each filter size
pooled_output = []
for i,filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# convolution layer
filter_shape = [filter_size,embedding_size,1,num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape,stddev=0.1),name="W")
b = tf.Variable(tf.constant(0.1,shape=[num_filters]))
# convolution operation
conv = tf.nn.conv2d(self._embedded_words_expanded,
W,
strides=[1,1,1,1],
padding="VALID",
name="conv")
# shape of convolution output is [batch_size,sequence_length - filter_size + 1,1,num_filters]
# apply activation function
h = tf.nn.tanh(tf.nn.bias_add(conv,b),name="tanh")
# max-pooling layers
pooled = tf.nn.max_pool(h,
ksize=[1,sequence_length - filter_size + 1,1,1],
strides=[1,1,1,1],
padding="VALID",
name="pool")
# shape of pooled [batch_size,1,1,num_filters]
pooled_output.append(pooled)
# combine all the pooled output
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_output,axis=3)
filters = tf.reshape(self.h_pool,[-1,len(filter_sizes),num_filters])
filters_max = tf.reshape(tf.reduce_max(filters,axis=1),[-1,num_filters],name="latent_feature")
# features
self.h_pool_flatened = tf.concat([filters_max,self._features],axis=1,name="sentences")
# final scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(name="W",shape=[num_filters + feature_size,num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1,tf.float32,shape=[num_classes],name="b"))
l2_loss += tf.nn.l2_loss(W)
# l2_loss += tf.nn.l2_loss(b) # really?
self._scores = tf.nn.xw_plus_b(self.h_pool_flatened,W,b,name="scores")
# calculate cost-function
with tf.name_scope("loss"):
losses = 1.0 / 2 * tf.reduce_mean(tf.pow((self._scores - self._input_y),2))
#==============================================================================
# losses = 1.0 / 2 * tf.reduce_mean(-(self._input_y * tf.log(tf.clip_by_value(self._scores,1e-10,1.0))
# + (1 - self._input_y) * tf.log(tf.clip_by_value(1 - self._scores,1e-10,1.0))))
#==============================================================================
self._loss = losses + l2_reg_lambda * l2_loss
| gpl-3.0 | -4,465,537,385,845,269,500 | 42.140496 | 126 | 0.499808 | false | 4.037123 | false | false | false |
scdoshi/django-bits | bits/general.py | 1 | 2707 | """
General utilities for django.
"""
###############################################################################
## Imports
###############################################################################
from django.conf import settings
from django.contrib.auth.models import Group, SiteProfileNotAvailable
from django.core.cache import cache
from django.db.models import get_model
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver, Signal
from django.shortcuts import _get_queryset
###############################################################################
## Utils
###############################################################################
def get_or_none(klass, *args, **kwargs):
"""
Uses get() to return an object or None if the object does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), a MultipleObjectsReturned will be raised if more
than one object is found.
From django-annoying: https://bitbucket.org/offline/django-annoying
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
return None
def get_profile_model():
"""
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting.
:return: The model that is used as profile.
"""
if (not hasattr(settings, 'AUTH_PROFILE_MODULE') or
not settings.AUTH_PROFILE_MODULE):
raise SiteProfileNotAvailable
profile_mod = get_model(*settings.AUTH_PROFILE_MODULE.split('.'))
if profile_mod is None:
raise SiteProfileNotAvailable
return profile_mod
def get_group(name):
"""
Return group with given name, if it exists. Check cache first.
"""
group = cache.get('bits.general.group_%s' % name)
if not group:
group = Group.objects.get(name=name)
cache.set('bits.general.group_%s' % name, group, 365 * 24 * 60 * 60)
return group
@receiver(post_delete, sender=Group,
dispatch_uid='bits.general.group_post_delete')
@receiver(post_save, sender=Group,
dispatch_uid='bits.general.group_post_save')
def group_post_save_delete(sender, instance, created, **kwargs):
cache.delete('bits.general.group_%s' % instance.name)
###############################################################################
## Signals
###############################################################################
after_syncdb = Signal()
"""To send after post_syncdb is done. Currently via custom command"""
| bsd-2-clause | -1,473,195,700,488,016,100 | 32.8375 | 79 | 0.573328 | false | 4.580372 | false | false | false |
cjekel/USA_GDP_per_capita_inflation_adjust | plotCountryGNI.py | 1 | 3694 | import numpy as np
import matplotlib.pyplot as plt
years2 = np.array([1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014])
usaGNI = np.array([612178550047.646,646233886826.65,692328219512.945,753294530375.941,824183577234.192,868295290971.962,952033980993.251,1027990251284.03,1098553055567.61,1183038457083.86,1320921418184.74,1548458249174.67,1711839855738.22,1842214711486.27,1958767403397.59,2117456144199.84,2401109359261.26,2751769589536.9,3048093901726.34,3303883972259.98,3297652203866.24,3411202239818.87,3828479505092.12,4164905103485.73,4601500378186.56,5200354088055.45,5765196251790.1,5888830786924.1,6029529322891.06,6164277951121.71,6612706041742.15,6883086506452.91,7302781827892.38,7760854970064.45,8184808773787.28,8558708987900.82,8869581532268.98,9425292191447.05,10178500697503.7,10498594829042.2,10776200783181,11589035965657.3,12790914724399.8,13693955258225.3,14345564947204.5,14651211130474,15002428215985,14740580035992.9,15143137264678.1,15727290871234.6,16501015978642.4,17001290051112.6,17611490812741.3])
# GNI data atlas method from the worldbank http://databank.worldbank.org/data/reports.aspx?source=2&country=USA&series=&period=#
# CPI data from bureau of labor statistics http://data.bls.gov/pdq/SurveyOutputServlet
usaCPI2 = np.array([30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8, 40.5, 41.8, 44.4, 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5, 99.6, 103.9, 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, 144.5, 148.2, 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, 184.0, 188.9, 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939, 229.594, 232.957, 236.736])
plt.figure()
plt.plot(years2, usaGNI)
plt.xlabel('Year')
plt.ylabel('GNI in Current USD')
plt.grid(True)
plt.show()
# Adjust GNI for 1962 USD
usaGNI1962 = usaGNI / (usaCPI2 / usaCPI2[0])
plt.figure()
plt.plot(years2, usaGNI1962)
plt.xlabel('Year')
plt.ylabel('GNI adjusted for inflation to 1962 USD')
plt.grid(True)
plt.show()
# Adjust GNI for 2014 USD
usaGNI2014 = usaGNI / (usaCPI2 / usaCPI2[-1])
plt.figure()
plt.plot(years2, usaGNI2014)
plt.xlabel('Year')
plt.ylabel('GNI adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
# population from world bank
usaPop = np.array([186538000,189242000,191889000,194303000,196560000,198712000,200706000,202677000,205052000,207661000,209896000,211909000,213854000,215973000,218035000,220239000,222585000,225055000,227225000,229466000,231664000,233792000,235825000,237924000,240133000,242289000,244499000,246819000,249623000,252981000,256514000,259919000,263126000,266278000,269394000,272657000,275854000,279040000,282162411,284968955,287625193,290107933,292805298,295516599,298379912,301231207,304093966,306771529,309347057,311721632,314112078,316497531,318857056])
usaGNIpercapita = usaGNI / usaPop
plt.figure()
plt.plot(years2, usaGNIpercapita)
plt.xlabel('Year')
plt.ylabel('GNI per capita in Current USD')
plt.grid(True)
plt.show()
# adjust GNI per Capita to 1962s numbers
usaGNIpercapita1962 = usaGNIpercapita / (usaCPI2 / usaCPI2[0])
plt.figure()
plt.plot(years2, usaGNIpercapita1962)
plt.xlabel('Year')
plt.ylabel('GNI per capita adjusted for inflation to 1962 USD')
plt.grid(True)
plt.show()
# adjust GNI per Capita to 2014s numbers
usaGNIpercapita2014 = usaGNIpercapita / (usaCPI2 / usaCPI2[-1])
plt.figure()
plt.plot(years2, usaGNIpercapita2014)
plt.xlabel('Year')
plt.ylabel('GNI per capita adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
| mit | -8,321,938,076,944,006,000 | 55.830769 | 911 | 0.784515 | false | 2.153936 | false | false | false |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_8_0/device_environment_monitor_broker.py | 16 | 69932 | from ..broker import Broker
class DeviceEnvironmentMonitorBroker(Broker):
controller = "device_environment_monitors"
def index(self, **kwargs):
"""Lists the available device environment monitors. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which device environment information was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which device environment information was collected.
:type DeviceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device environment monitors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device environment monitor methods. The listed methods will be called on each device environment monitor returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DevEnvMonID
:param sort: The data field(s) to use for sorting the output. Default is DevEnvMonID. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceEnvironmentMonitor. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_environment_monitors: An array of the DeviceEnvironmentMonitor objects that match the specified input criteria.
:rtype device_environment_monitors: Array of DeviceEnvironmentMonitor
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified device environment monitor.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device environment monitor methods. The listed methods will be called on each device environment monitor returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_environment_monitor: The device environment monitor identified by the specified DevEnvMonID.
:rtype device_environment_monitor: DeviceEnvironmentMonitor
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available device environment monitors matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonChangedCols: The fields that changed between this revision of the record and the previous revision.
:type DevEnvMonChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonChangedCols: The fields that changed between this revision of the record and the previous revision.
:type DevEnvMonChangedCols: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonDescr: The NetMRI-determined description of the device environment monitor.
:type DevEnvMonDescr: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonDescr: The NetMRI-determined description of the device environment monitor.
:type DevEnvMonDescr: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonEndTime: The ending effective time of this record, or empty if still in effect.
:type DevEnvMonEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonEndTime: The ending effective time of this record, or empty if still in effect.
:type DevEnvMonEndTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonHighShutdown: The high value of the shut down process in the device environment monitor.
:type DevEnvMonHighShutdown: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonHighShutdown: The high value of the shut down process in the device environment monitor.
:type DevEnvMonHighShutdown: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonHighWarnVal: The high value of the warning message in the device environment monitor.
:type DevEnvMonHighWarnVal: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonHighWarnVal: The high value of the warning message in the device environment monitor.
:type DevEnvMonHighWarnVal: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonIndex: The index of the device in the device environment.
:type DevEnvMonIndex: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonIndex: The index of the device in the device environment.
:type DevEnvMonIndex: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonLowShutdown: The low value of the shut down process in the device environment monitor.
:type DevEnvMonLowShutdown: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonLowShutdown: The low value of the shut down process in the device environment monitor.
:type DevEnvMonLowShutdown: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonLowWarnVal: The low value of the warning message in the device environment monitor.
:type DevEnvMonLowWarnVal: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonLowWarnVal: The low value of the warning message in the device environment monitor.
:type DevEnvMonLowWarnVal: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonMeasure: The measure of the device environment monitor.
:type DevEnvMonMeasure: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonMeasure: The measure of the device environment monitor.
:type DevEnvMonMeasure: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonStartTime: The starting effective time of this record.
:type DevEnvMonStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonStartTime: The starting effective time of this record.
:type DevEnvMonStartTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonState: The current state of the device in the device environment monitor.
:type DevEnvMonState: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonState: The current state of the device in the device environment monitor.
:type DevEnvMonState: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatus: The status of the device in the Device Environment Monitor.
:type DevEnvMonStatus: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatus: The status of the device in the Device Environment Monitor.
:type DevEnvMonStatus: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatusAlert: The alert status of the device environment monitor.
:type DevEnvMonStatusAlert: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatusAlert: The alert status of the device environment monitor.
:type DevEnvMonStatusAlert: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatusMessage: The status message of the device environment monitor.
:type DevEnvMonStatusMessage: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatusMessage: The status message of the device environment monitor.
:type DevEnvMonStatusMessage: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonTimestamp: The date and time this record was collected or calculated.
:type DevEnvMonTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonTimestamp: The date and time this record was collected or calculated.
:type DevEnvMonTimestamp: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonType: The NetMRI-determined monitor type of Device Environment.
:type DevEnvMonType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonType: The NetMRI-determined monitor type of Device Environment.
:type DevEnvMonType: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which device environment information was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which device environment information was collected.
:type DeviceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device environment monitors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device environment monitor methods. The listed methods will be called on each device environment monitor returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DevEnvMonID
:param sort: The data field(s) to use for sorting the output. Default is DevEnvMonID. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceEnvironmentMonitor. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device environment monitors, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DevEnvMonChangedCols, DevEnvMonDescr, DevEnvMonEndTime, DevEnvMonHighShutdown, DevEnvMonHighWarnVal, DevEnvMonID, DevEnvMonIndex, DevEnvMonLowShutdown, DevEnvMonLowWarnVal, DevEnvMonMeasure, DevEnvMonStartTime, DevEnvMonState, DevEnvMonStatus, DevEnvMonStatusAlert, DevEnvMonStatusMessage, DevEnvMonTimestamp, DevEnvMonType, DeviceID.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_environment_monitors: An array of the DeviceEnvironmentMonitor objects that match the specified input criteria.
:rtype device_environment_monitors: Array of DeviceEnvironmentMonitor
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device environment monitors matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DevEnvMonChangedCols, DevEnvMonDescr, DevEnvMonEndTime, DevEnvMonHighShutdown, DevEnvMonHighWarnVal, DevEnvMonID, DevEnvMonIndex, DevEnvMonLowShutdown, DevEnvMonLowWarnVal, DevEnvMonMeasure, DevEnvMonStartTime, DevEnvMonState, DevEnvMonStatus, DevEnvMonStatusAlert, DevEnvMonStatusMessage, DevEnvMonTimestamp, DevEnvMonType, DeviceID.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonChangedCols: The operator to apply to the field DevEnvMonChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonChangedCols: If op_DevEnvMonChangedCols is specified, the field named in this input will be compared to the value in DevEnvMonChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonChangedCols must be specified if op_DevEnvMonChangedCols is specified.
:type val_f_DevEnvMonChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonChangedCols: If op_DevEnvMonChangedCols is specified, this value will be compared to the value in DevEnvMonChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonChangedCols must be specified if op_DevEnvMonChangedCols is specified.
:type val_c_DevEnvMonChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonDescr: The operator to apply to the field DevEnvMonDescr. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonDescr: The NetMRI-determined description of the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonDescr: If op_DevEnvMonDescr is specified, the field named in this input will be compared to the value in DevEnvMonDescr using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonDescr must be specified if op_DevEnvMonDescr is specified.
:type val_f_DevEnvMonDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonDescr: If op_DevEnvMonDescr is specified, this value will be compared to the value in DevEnvMonDescr using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonDescr must be specified if op_DevEnvMonDescr is specified.
:type val_c_DevEnvMonDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonEndTime: The operator to apply to the field DevEnvMonEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonEndTime: If op_DevEnvMonEndTime is specified, the field named in this input will be compared to the value in DevEnvMonEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonEndTime must be specified if op_DevEnvMonEndTime is specified.
:type val_f_DevEnvMonEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonEndTime: If op_DevEnvMonEndTime is specified, this value will be compared to the value in DevEnvMonEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonEndTime must be specified if op_DevEnvMonEndTime is specified.
:type val_c_DevEnvMonEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonHighShutdown: The operator to apply to the field DevEnvMonHighShutdown. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonHighShutdown: The high value of the shut down process in the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonHighShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonHighShutdown: If op_DevEnvMonHighShutdown is specified, the field named in this input will be compared to the value in DevEnvMonHighShutdown using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonHighShutdown must be specified if op_DevEnvMonHighShutdown is specified.
:type val_f_DevEnvMonHighShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonHighShutdown: If op_DevEnvMonHighShutdown is specified, this value will be compared to the value in DevEnvMonHighShutdown using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonHighShutdown must be specified if op_DevEnvMonHighShutdown is specified.
:type val_c_DevEnvMonHighShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonHighWarnVal: The operator to apply to the field DevEnvMonHighWarnVal. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonHighWarnVal: The high value of the warning message in the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonHighWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonHighWarnVal: If op_DevEnvMonHighWarnVal is specified, the field named in this input will be compared to the value in DevEnvMonHighWarnVal using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonHighWarnVal must be specified if op_DevEnvMonHighWarnVal is specified.
:type val_f_DevEnvMonHighWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonHighWarnVal: If op_DevEnvMonHighWarnVal is specified, this value will be compared to the value in DevEnvMonHighWarnVal using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonHighWarnVal must be specified if op_DevEnvMonHighWarnVal is specified.
:type val_c_DevEnvMonHighWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonID: The operator to apply to the field DevEnvMonID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonID: The internal NetMRI identifier of Device Environment. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonID: If op_DevEnvMonID is specified, the field named in this input will be compared to the value in DevEnvMonID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonID must be specified if op_DevEnvMonID is specified.
:type val_f_DevEnvMonID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonID: If op_DevEnvMonID is specified, this value will be compared to the value in DevEnvMonID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonID must be specified if op_DevEnvMonID is specified.
:type val_c_DevEnvMonID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonIndex: The operator to apply to the field DevEnvMonIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonIndex: The index of the device in the device environment. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonIndex: If op_DevEnvMonIndex is specified, the field named in this input will be compared to the value in DevEnvMonIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonIndex must be specified if op_DevEnvMonIndex is specified.
:type val_f_DevEnvMonIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonIndex: If op_DevEnvMonIndex is specified, this value will be compared to the value in DevEnvMonIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonIndex must be specified if op_DevEnvMonIndex is specified.
:type val_c_DevEnvMonIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonLowShutdown: The operator to apply to the field DevEnvMonLowShutdown. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonLowShutdown: The low value of the shut down process in the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonLowShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonLowShutdown: If op_DevEnvMonLowShutdown is specified, the field named in this input will be compared to the value in DevEnvMonLowShutdown using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonLowShutdown must be specified if op_DevEnvMonLowShutdown is specified.
:type val_f_DevEnvMonLowShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonLowShutdown: If op_DevEnvMonLowShutdown is specified, this value will be compared to the value in DevEnvMonLowShutdown using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonLowShutdown must be specified if op_DevEnvMonLowShutdown is specified.
:type val_c_DevEnvMonLowShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonLowWarnVal: The operator to apply to the field DevEnvMonLowWarnVal. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonLowWarnVal: The low value of the warning message in the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonLowWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonLowWarnVal: If op_DevEnvMonLowWarnVal is specified, the field named in this input will be compared to the value in DevEnvMonLowWarnVal using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonLowWarnVal must be specified if op_DevEnvMonLowWarnVal is specified.
:type val_f_DevEnvMonLowWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonLowWarnVal: If op_DevEnvMonLowWarnVal is specified, this value will be compared to the value in DevEnvMonLowWarnVal using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonLowWarnVal must be specified if op_DevEnvMonLowWarnVal is specified.
:type val_c_DevEnvMonLowWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonMeasure: The operator to apply to the field DevEnvMonMeasure. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonMeasure: The measure of the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonMeasure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonMeasure: If op_DevEnvMonMeasure is specified, the field named in this input will be compared to the value in DevEnvMonMeasure using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonMeasure must be specified if op_DevEnvMonMeasure is specified.
:type val_f_DevEnvMonMeasure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonMeasure: If op_DevEnvMonMeasure is specified, this value will be compared to the value in DevEnvMonMeasure using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonMeasure must be specified if op_DevEnvMonMeasure is specified.
:type val_c_DevEnvMonMeasure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonStartTime: The operator to apply to the field DevEnvMonStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonStartTime: If op_DevEnvMonStartTime is specified, the field named in this input will be compared to the value in DevEnvMonStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonStartTime must be specified if op_DevEnvMonStartTime is specified.
:type val_f_DevEnvMonStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonStartTime: If op_DevEnvMonStartTime is specified, this value will be compared to the value in DevEnvMonStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonStartTime must be specified if op_DevEnvMonStartTime is specified.
:type val_c_DevEnvMonStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonState: The operator to apply to the field DevEnvMonState. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonState: The current state of the device in the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonState: If op_DevEnvMonState is specified, the field named in this input will be compared to the value in DevEnvMonState using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonState must be specified if op_DevEnvMonState is specified.
:type val_f_DevEnvMonState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonState: If op_DevEnvMonState is specified, this value will be compared to the value in DevEnvMonState using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonState must be specified if op_DevEnvMonState is specified.
:type val_c_DevEnvMonState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonStatus: The operator to apply to the field DevEnvMonStatus. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonStatus: The status of the device in the Device Environment Monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonStatus: If op_DevEnvMonStatus is specified, the field named in this input will be compared to the value in DevEnvMonStatus using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonStatus must be specified if op_DevEnvMonStatus is specified.
:type val_f_DevEnvMonStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonStatus: If op_DevEnvMonStatus is specified, this value will be compared to the value in DevEnvMonStatus using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonStatus must be specified if op_DevEnvMonStatus is specified.
:type val_c_DevEnvMonStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonStatusAlert: The operator to apply to the field DevEnvMonStatusAlert. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonStatusAlert: The alert status of the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonStatusAlert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonStatusAlert: If op_DevEnvMonStatusAlert is specified, the field named in this input will be compared to the value in DevEnvMonStatusAlert using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonStatusAlert must be specified if op_DevEnvMonStatusAlert is specified.
:type val_f_DevEnvMonStatusAlert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonStatusAlert: If op_DevEnvMonStatusAlert is specified, this value will be compared to the value in DevEnvMonStatusAlert using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonStatusAlert must be specified if op_DevEnvMonStatusAlert is specified.
:type val_c_DevEnvMonStatusAlert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonStatusMessage: The operator to apply to the field DevEnvMonStatusMessage. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonStatusMessage: The status message of the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonStatusMessage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonStatusMessage: If op_DevEnvMonStatusMessage is specified, the field named in this input will be compared to the value in DevEnvMonStatusMessage using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonStatusMessage must be specified if op_DevEnvMonStatusMessage is specified.
:type val_f_DevEnvMonStatusMessage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonStatusMessage: If op_DevEnvMonStatusMessage is specified, this value will be compared to the value in DevEnvMonStatusMessage using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonStatusMessage must be specified if op_DevEnvMonStatusMessage is specified.
:type val_c_DevEnvMonStatusMessage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonTimestamp: The operator to apply to the field DevEnvMonTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonTimestamp: If op_DevEnvMonTimestamp is specified, the field named in this input will be compared to the value in DevEnvMonTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonTimestamp must be specified if op_DevEnvMonTimestamp is specified.
:type val_f_DevEnvMonTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonTimestamp: If op_DevEnvMonTimestamp is specified, this value will be compared to the value in DevEnvMonTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonTimestamp must be specified if op_DevEnvMonTimestamp is specified.
:type val_c_DevEnvMonTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonType: The operator to apply to the field DevEnvMonType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonType: The NetMRI-determined monitor type of Device Environment. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonType: If op_DevEnvMonType is specified, the field named in this input will be compared to the value in DevEnvMonType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonType must be specified if op_DevEnvMonType is specified.
:type val_f_DevEnvMonType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonType: If op_DevEnvMonType is specified, this value will be compared to the value in DevEnvMonType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonType must be specified if op_DevEnvMonType is specified.
:type val_c_DevEnvMonType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which device environment information was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device environment monitors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device environment monitor methods. The listed methods will be called on each device environment monitor returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DevEnvMonID
:param sort: The data field(s) to use for sorting the output. Default is DevEnvMonID. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceEnvironmentMonitor. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_environment_monitors: An array of the DeviceEnvironmentMonitor objects that match the specified input criteria.
:rtype device_environment_monitors: Array of DeviceEnvironmentMonitor
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def device(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
def infradevice(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
| apache-2.0 | 471,652,885,452,120,700 | 54.238547 | 773 | 0.620088 | false | 4.474216 | false | false | false |
turbokongen/home-assistant | homeassistant/components/xbox/sensor.py | 9 | 2594 | """Xbox friends binary sensors."""
from functools import partial
from typing import Dict, List
from homeassistant.core import callback
from homeassistant.helpers.entity_registry import (
async_get_registry as async_get_entity_registry,
)
from homeassistant.helpers.typing import HomeAssistantType
from . import XboxUpdateCoordinator
from .base_sensor import XboxBaseSensorEntity
from .const import DOMAIN
SENSOR_ATTRIBUTES = ["status", "gamer_score", "account_tier", "gold_tenure"]
async def async_setup_entry(hass: HomeAssistantType, config_entry, async_add_entities):
"""Set up Xbox Live friends."""
coordinator: XboxUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id][
"coordinator"
]
update_friends = partial(async_update_friends, coordinator, {}, async_add_entities)
unsub = coordinator.async_add_listener(update_friends)
hass.data[DOMAIN][config_entry.entry_id]["sensor_unsub"] = unsub
update_friends()
class XboxSensorEntity(XboxBaseSensorEntity):
"""Representation of a Xbox presence state."""
@property
def state(self):
"""Return the state of the requested attribute."""
if not self.coordinator.last_update_success:
return None
return getattr(self.data, self.attribute, None)
@callback
def async_update_friends(
coordinator: XboxUpdateCoordinator,
current: Dict[str, List[XboxSensorEntity]],
async_add_entities,
) -> None:
"""Update friends."""
new_ids = set(coordinator.data.presence)
current_ids = set(current)
# Process new favorites, add them to Home Assistant
new_entities = []
for xuid in new_ids - current_ids:
current[xuid] = [
XboxSensorEntity(coordinator, xuid, attribute)
for attribute in SENSOR_ATTRIBUTES
]
new_entities = new_entities + current[xuid]
if new_entities:
async_add_entities(new_entities)
# Process deleted favorites, remove them from Home Assistant
for xuid in current_ids - new_ids:
coordinator.hass.async_create_task(
async_remove_entities(xuid, coordinator, current)
)
async def async_remove_entities(
xuid: str,
coordinator: XboxUpdateCoordinator,
current: Dict[str, XboxSensorEntity],
) -> None:
"""Remove friend sensors from Home Assistant."""
registry = await async_get_entity_registry(coordinator.hass)
entities = current[xuid]
for entity in entities:
if entity.entity_id in registry.entities:
registry.async_remove(entity.entity_id)
del current[xuid]
| apache-2.0 | 5,337,372,128,086,291,000 | 30.253012 | 87 | 0.696608 | false | 3.94825 | false | false | false |
baharev/SDOPT | sdopt/representation/dag_util.py | 1 | 8916 | from __future__ import print_function
from array import array
from collections import defaultdict
import networkx as nx
import six
from ..nodes.types import is_sum_node, is_var_node
from ..nodes.attributes import NodeAttr
def dbg_info(dag, optional_callable=None):
print('-------------------------------------------------------------------')
if optional_callable: optional_callable()
print('Nodes: %d, edges: %d'%(dag.number_of_nodes(),dag.number_of_edges()) )
print('Is DAG?', nx.is_directed_acyclic_graph(dag))
nwcc = nx.number_weakly_connected_components(dag)
print('Weakly connected components:', nwcc)
dbg_pprint_source_sink_types(dag)
print('-------------------------------------------------------------------')
def dbg_pprint_source_sink_types(dag):
source_types = group_node_ids_by_kind(itr_sourcetype_nodeid(dag))
sink_types = group_node_ids_by_kind(itr_sinktype_nodeid( dag))
print('Sources:')
dbg_pprint_kind_nodeids(source_types)
print('Sinks:')
dbg_pprint_kind_nodeids(sink_types)
def dbg_pprint_kind_nodeids(kind_nodeids):
for kind, nodeids in kind_nodeids.items():
count = len(nodeids)
print(' ', kind, nodeids if count <= 20 else '', '(count=%d)' % count)
def group_node_ids_by_kind(itr_kind_nodeid_pairs):
types = defaultdict(list)
for kind, n in itr_kind_nodeid_pairs:
types[kind].append(n)
return types
def itr_sourcetype_nodeid(dag):
return ((get_pretty_type_str(dag, n), n) for n in dag if is_source(dag, n))
def itr_sinktype_nodeid(dag):
return ((get_pretty_type_str(dag, n), n) for n in dag if is_sink(dag, n))
def itr_sink_con_num_nodeid(dag):
'(con_num, node_id) for sinks only; assumes that the problem has been setup'
return ((dag.node[n][NodeAttr.con_num], n) for n in dag if is_sink(dag, n))
def is_source(dag, node_id):
return len(dag.pred[node_id])==0
def is_sink(dag, node_id):
return len(dag.succ[node_id])==0
# FIXME Part of the dispatching mechanism, revise!
def get_pretty_type_str(dag, n):
return dag.node[n][NodeAttr.type] + '_node'
# TODO Would be a nice addition to nx
def iter_attr(G, nbunch, name):
for n in nbunch:
yield n, G.node[n][name]
def itr_var_num(G, var_node_ids):
for n in var_node_ids:
yield n, G.node[n][NodeAttr.var_num]
def itr_sinks(dag, nbunch):
return (n for n in nbunch if is_sink(dag, n))
def itr_sum_nodes(dag):
return (n for n in dag if is_sum_node(dag.node[n]))
def itr_siso_sum_nodes(dag):
return (n for n in itr_sum_nodes(dag) if len(dag.pred[n])==1
and len(dag.succ[n])==1 )
def itr_single_input_nodes(dag, node_ids):
return (n for n in node_ids if len(dag.pred[n])==1)
def get_single_pred(dag, n):
return next(iter(dag.pred[n]))
def get_single_succ(dag, n):
return next(iter(dag.succ[n]))
def deterministic_topological_sort(dag):
# This function is stolen from networkx.algorithms.dag.topological_sort
# made the returned order deterministic by pre-sorting the nodes by their ID
seen = set()
order = []
explored = set()
nbunch = sorted(dag.nodes_iter()) # <-- SORTED
for v in nbunch: # process all vertices in G
if v in explored:
continue
fringe = [v] # nodes yet to look at
while fringe:
w = fringe[-1] # depth first search
if w in explored: # already looked down this branch
fringe.pop()
continue
seen.add(w) # mark as seen
# Check successors for cycles and for new nodes
new_nodes = []
for n in sorted(six.iterkeys(dag[w])): # <-- SORTED
if n not in explored:
if n in seen: #CYCLE !!
raise nx.NetworkXUnfeasible("Graph contains a cycle.")
new_nodes.append(n)
if new_nodes: # Add new_nodes to fringe
fringe.extend(new_nodes)
else: # No new nodes so w is fully explored
explored.add(w)
order.append(w)
fringe.pop() # done considering this node
return list(reversed(order))
def plot(dag):
from matplotlib import pyplot as plt
node_labels = nx.get_node_attributes(dag, NodeAttr.display)
edge_labels = nx.get_edge_attributes(dag, 'weight')
dag_copy = dag.to_directed()
for _, d in dag_copy.nodes_iter(data=True):
d.clear()
# FIXME Why does this try to copy attributes that it cannot?
positions = nx.graphviz_layout(dag_copy, prog='dot')
nx.draw_networkx_edge_labels(dag, positions, edge_labels, rotate=False)
nx.draw_networkx(dag, pos=positions, labels=node_labels, node_size=800)
mng = plt.get_current_fig_manager()
# TODO Post a wrapper to Code Review?
#mng.full_screen_toggle()
mng.resize(1865,1025)
plt.show()
################################################################################
# Respect children order: add_edge, remove_node, remove_edge, reverse_edge
################################################################################
def add_edge(dag, src, dest, attr_dict):
dag.add_edge(src, dest, attr_dict)
dag.node[dest].setdefault(NodeAttr.input_ord, array('l')).append(src)
def reparent(dag, new_parent, node_to_del):
# delete node_to_del and connect all children to new_parent, with edge dict;
# update each child's input order array to contain the new parent
out_edges = dag.edge[node_to_del]
# In case we already deleted the new parent in a previous round; reparent
# would insert it again and that node would have an empty dict
assert new_parent in dag, '{}, {}'.format(new_parent, node_to_del)
assert_source(dag, node_to_del)
remove_node(dag, node_to_del)
for child_id, edge_dict in six.iteritems(out_edges):
dag.add_edge(new_parent, child_id, edge_dict)
replace(dag.node[child_id][NodeAttr.input_ord], node_to_del, new_parent)
def remove_node(dag, n):
d = dag.node[n]
assert NodeAttr.bounds not in d, d
dag.remove_node(n)
def reverse_edge_to_get_def_var(dag, sum_node_id, var_node_id):
# lambda * <var node> + <lin. comb.> + d = bounds
# node id: n+1 n
# <var node> = (-1/lambda) * ( <lin. comb.> + d - bounds)
#
# add the new reversed edge
e = dag[var_node_id][sum_node_id]
e['weight'] = -1.0/e['weight']
add_edge(dag, sum_node_id, var_node_id, e)
# drop the old edge
dag.remove_edge(var_node_id, sum_node_id)
# update the sum node
d = dag.node[sum_node_id]
# d_term -= rhs
d_term = d.get(NodeAttr.d_term, 0.0) - d[NodeAttr.bounds].l# l == u == rhs
d[NodeAttr.d_term] = d_term #already asserted
del d[NodeAttr.bounds]
d[NodeAttr.input_ord].remove(var_node_id)
def replace(arr, old_value, new_value):
for index, item in enumerate(arr):
if item==old_value:
arr[index] = new_value
def add_keep_smaller_value(mapping, key, value):
# mapping[key]=value BUT if key is already present, keeps smaller value
old_value = mapping.get(key, value)
mapping[key] = min(old_value, value)
def assert_source(dag, node_id):
assert is_source(dag, node_id), 'node %d %s' % (node_id, dag.node[node_id])
def assert_CSE_defining_constraints(dag, con_ends, base_vars):
# A constraint (sum) node immediately followed by a defined var node; with
# an edge from the var node to the sum node; and the rhs of the constraint
# is a real number, not an interval.
# Basically: lambda * <var node> + <some sum> + d = bounds
# <N> c 20 145
# <145> b [0,0]: +
# <146> V 20
# <E> 145 146 -1
for n in con_ends:
# check the sum_node
d = dag.node[n]
assert is_sum_node(d), 'expected a sum_node, found: %s' % d
assert NodeAttr.bounds in d,'Should have bounds, node: %s' % d
lb, ub = d[NodeAttr.bounds]
assert lb==ub,'rhs expected to be a constant, node: %s' % d
# check the var_node
assert n+1 in dag,'expected a var_node; not CSE defining constraint: %s'%d
def_var = dag.node[n+1]
assert is_var_node(def_var), 'expected a var_node, found: %s' % def_var
assert n+1 not in base_vars,'expected a defined var, found %s' % def_var
assert NodeAttr.bounds not in def_var, \
'CSEs must not have bounds, found\n %s' % def_var
assert n in dag.edge[n+1],'Nodes not connected:\n %s \n %s'%(d,def_var)
def assert_vars_are_CSEs(dag, var_node_ids, var_num_def_node):
for var_node in var_node_ids:
var_num = dag.node[var_node][NodeAttr.var_num]
assert var_num in var_num_def_node,'var_num: %d' % var_num
| bsd-3-clause | -8,298,086,595,619,204,000 | 38.803571 | 82 | 0.597465 | false | 3.251641 | false | false | false |
emesene/emesene | emesene/e3/papylib/papyon/papyon/gnet/proxy/SOCKS4.py | 6 | 4563 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Johann Prieur <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from abstract import AbstractProxy
from papyon.gnet.io import TCPClient
from papyon.gnet.constants import *
from papyon.gnet.errors import *
from papyon.gnet.parser import DelimiterParser
import gobject
import logging
import struct
__all__ = ['SOCKS4Proxy']
logger = logging.getLogger('papyon.proxy.SOCKS4')
class SOCKS4Proxy(AbstractProxy):
PROTOCOL_VERSION = 4
CONNECT_COMMAND = 1
"""Proxy class used to communicate with SOCKS4 proxies."""
def __init__(self, client, proxy_infos):
assert(proxy_infos.type == 'socks4'), \
"SOCKS4Proxy expects a socks4 proxy description"
# TODO : implement version 4a of the protocol to allow proxy-side name resolution
assert(client.domain == AF_INET), \
"SOCKS4 CONNECT only handles INET address family"
assert(client.type == SOCK_STREAM), \
"SOCKS4 CONNECT only handles SOCK_STREAM"
assert(client.status == IoStatus.CLOSED), \
"SOCKS4Proxy expects a closed client"
AbstractProxy.__init__(self, client, proxy_infos)
self._transport = TCPClient(self._proxy.host, self._proxy.port)
self._transport.connect("notify::status", self._on_transport_status)
self._transport.connect("error", self._on_transport_error)
self._delimiter_parser = DelimiterParser(self._transport)
self._delimiter_parser.delimiter = 8
self._delimiter_parser.connect("received", self._on_proxy_response)
# Opening state methods
def _pre_open(self, io_object=None):
AbstractProxy._pre_open(self)
def _post_open(self):
AbstractProxy._post_open(self)
user = self._proxy.user
proxy_protocol = struct.pack('!BBH', SOCKS4Proxy.PROTOCOL_VERSION,
SOCKS4Proxy.CONNECT_COMMAND, self.port)
for part in self.host.split('.'):
proxy_protocol += struct.pack('B', int(part))
proxy_protocol += user
proxy_protocol += struct.pack('B', 0)
self._delimiter_parser.enable()
self._transport.send(proxy_protocol)
# Public API
@property
def protocol(self):
return "SOCKS4"
def open(self):
"""Open the connection."""
if not self._configure():
return
self._pre_open()
try:
self._transport.open()
except:
pass
def close(self):
"""Close the connection."""
self._delimiter_parser.disable()
self._client._proxy_closed()
self._transport.close()
def send(self, buffer, callback=None, errback=None):
self._client.send(buffer, callback, errback=None)
# Callbacks
def _on_transport_status(self, transport, param):
if transport.status == IoStatus.OPEN:
self._post_open()
elif transport.status == IoStatus.OPENING:
self._client._proxy_opening(self._transport._transport)
self._status = transport.status
else:
self._status = transport.status
def _on_transport_error(self, transport, error):
self.close()
self.emit("error", error)
def _on_proxy_response(self, parser, response):
version, response_code = struct.unpack('BB', response[0:2])
assert(version == 0)
if self.status == IoStatus.OPENING:
if response_code == 90:
self._delimiter_parser.disable()
self._transport.disable()
self._client._proxy_open()
else:
logger.error("Connection failed (%s)" % response_code)
self.close()
self.emit("error", SOCKS4Error(self, response_code))
return False
gobject.type_register(SOCKS4Proxy)
| gpl-3.0 | -1,452,429,311,636,583,400 | 33.568182 | 89 | 0.635108 | false | 3.992126 | false | false | false |
zenodo/zenodo | zenodo/modules/communities/receivers.py | 1 | 1754 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2021 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Receivers for Zenodo Communities."""
from __future__ import absolute_import, print_function
from .tasks import dispatch_webhook
def send_inclusion_request_webhook(sender, request=None, **kwargs):
"""Signal receiver to send webhooks after a community inclusion request."""
dispatch_webhook.delay(
community_id=str(request.id_community),
record_id=str(request.id_record),
event_type='community.records.inclusion',
)
def send_record_accepted_webhook(
sender, record=None, community=None, **kwargs):
"""Signal receiver to send webhooks on a record accepted in a community."""
dispatch_webhook.delay(
community_id=str(community.id),
record_id=str(record.id),
event_type='community.records.addition',
)
| gpl-2.0 | -6,008,738,019,087,902,000 | 35.541667 | 79 | 0.72577 | false | 3.915179 | false | false | false |
oncer/imagemusic | test.py | 1 | 1458 | import random
import tkinter
from tkinter import ttk
from tkinter import messagebox
class App(object):
def __init__(self):
self.root = tkinter.Tk()
self.style = ttk.Style()
available_themes = self.style.theme_names()
random_theme = random.choice(available_themes)
self.style.theme_use(random_theme)
self.root.title(random_theme)
frm = ttk.Frame(self.root)
frm.pack(expand=True, fill='both')
# create a Combobox with themes to choose from
self.combo = ttk.Combobox(frm, values=available_themes)
self.combo.pack(padx=32, pady=8)
# make the Enter key change the style
self.combo.bind('<Return>', self.change_style)
# make a Button to change the style
button = ttk.Button(frm, text='OK')
button['command'] = self.change_style
button.pack(pady=8)
button2 = ttk.Button(frm, text='Test')
button2['command'] = self.open_dialog
button2.pack(pady=8)
def change_style(self, event=None):
"""set the Style to the content of the Combobox"""
content = self.combo.get()
try:
self.style.theme_use(content)
except tkinter.TclError as err:
messagebox.showerror('Error', err)
else:
self.root.title(content)
def open_dialog(self, event=None):
print(tkinter.filedialog.askopenfilename())
app = App()
app.root.mainloop()
| mit | 2,753,733,675,890,375,700 | 30.021277 | 63 | 0.618656 | false | 3.709924 | false | false | false |
shyamalschandra/keras | examples/imdb_lstm.py | 4 | 2097 | import numpy as np
from keras.preprocessing import sequence
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Embedding
from keras.layers.recurrent import LSTM, GRU
from keras.datasets import imdb
'''
Train a LSTM on the IMDB sentiment classification task.
The dataset is actually too small for LSTM to be of any advantage
compared to simpler, much faster methods such as TF-IDF+LogReg.
Notes:
- RNNs are tricky. Choice of batch size is important,
choice of loss and optimizer is critical, etc.
Most configurations won't converge.
- LSTM loss decrease during training can be quite different
from what you see with CNNs/MLPs/etc. It's more or less a sigmoid
instead of an inverse exponential.
'''
max_features=20000
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 16
print "Loading data..."
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features, test_split=0.2)
print len(X_train), 'train sequences'
print len(X_test), 'test sequences'
print "Pad sequences (samples x time)"
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print 'X_train shape:', X_train.shape
print 'X_test shape:', X_test.shape
print 'Build model...'
model = Sequential()
model.add(Embedding(max_features, 256))
model.add(LSTM(256, 128)) # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(128, 1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
print "Train..."
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=10, verbose=1)
score = model.evaluate(X_test, y_test, batch_size=batch_size)
print 'Test score:', score
classes = model.predict_classes(X_test, batch_size=batch_size)
acc = np_utils.accuracy(classes, y_test)
print 'Test accuracy:', acc
| mit | -6,056,320,864,871,329,000 | 32.822581 | 94 | 0.743443 | false | 3.398703 | true | false | false |
AlexKornitzer/radare2-extras | pimp/pimp.py | 1 | 13253 | import r2pipe
import triton
import struct
class R2Plugin(object):
def __init__(self, name, privdata):
self.privdata = privdata
self.name = name
self.command = _r2_plugin_args[0]
self.args = _r2_plugin_args[1:]
self.r2 = r2pipe.open()
bininfo = self.r2.cmdj("ij")["bin"]
self.arch = bininfo["arch"]
self.bits = bininfo["bits"]
self.regs = self.r2.cmd("drl").split()
self.switch_flagspace(name)
self.commands = {}
def get_reg(self, reg):
res = int(self.r2.cmd("dr {}".format(reg)), 16)
return res
def get_regs(self):
regs = {}
for reg in self.regs:
regs[reg] = self.get_reg(reg)
return regs
def get_maps(self):
return self.r2.cmdj("dmj")
def read_mem(self, address, size):
hexdata = self.r2.cmd("p8 {} @ 0x{:X}".format(size, address))
return hexdata.decode('hex')
def write_mem(self, address, data):
self.r2.cmd("wx {} @ 0x{:X}".format(data.encode("hex"), address))
def seek(self, addr=None):
if addr:
self.r2.cmd("s 0x{:x}".format(addr))
return int(self.r2.cmd("s"), 16)
def switch_flagspace(self, name):
self.r2.cmd("fs {}".format(name))
def set_flag(self, section, name, size, address):
name = "{}.{}.{}".format(self.name, section, name)
self.r2.cmd("f {} {} @ {}".format(name, size, address))
def set_comment(self, comment, address=None):
if address:
self.r2.cmd("CC {} @ 0x{:x}".format(comment, address))
else:
self.r2.cmd("CC {}".format(comment))
def r2cmd(self, name):
def dec(func):
self.command = _r2_plugin_args[0]
self.args = _r2_plugin_args[1:]
func.command = name
self.commands[name] = (func)
return dec
def handle(self):
if self.command in self.commands:
return self.commands[self.command](self.privdata, self.args)
print "[*] Unknown command {}".format(self.command)
def integer(self, s):
regs = self.get_regs()
if s in regs:
v = regs[s]
elif s.startswith("0x"):
v = int(s, 16)
else:
v = int(s)
return v
tritonarch = {
"x86": {
32: triton.ARCH.X86,
64: triton.ARCH.X86_64
}
}
class Pimp(object):
def __init__(self, context=None):
self.r2p = R2Plugin("pimp", self)
arch = self.r2p.arch
bits = self.r2p.bits
self.comments = {}
self.arch = tritonarch[arch][bits]
self.inputs = {}
self.regs = {}
triton.setArchitecture(self.arch)
triton.setAstRepresentationMode(triton.AST_REPRESENTATION.PYTHON)
# Hack in order to be able to get triton register ids by name
self.triton_regs = {}
for r in triton.getAllRegisters():
self.triton_regs[r.getName()] = r
if self.arch == triton.ARCH.X86:
self.pcreg = triton.REG.EIP
elif self.arch == triton.ARCH.X86_64:
self.pcreg = triton.REG.RIP
else:
raise(ValueError("Architecture not implemented"))
setattr(self.memoryCaching, "memsolver", self.r2p)
def reset(self):
triton.resetEngines()
triton.clearPathConstraints()
triton.setArchitecture(self.arch)
triton.enableMode(triton.MODE.ALIGNED_MEMORY, True)
triton.enableMode(triton.MODE.ONLY_ON_SYMBOLIZED, True)
triton.addCallback(self.memoryCaching,
triton.CALLBACK.GET_CONCRETE_MEMORY_VALUE)
triton.addCallback(self.constantFolding,
triton.CALLBACK.SYMBOLIC_SIMPLIFICATION)
for r in self.regs:
if r in self.triton_regs:
triton.setConcreteRegisterValue(
triton.Register(self.triton_regs[r], self.regs[r])
)
for m in cache:
triton.setConcreteMemoryAreaValue(m['start'], bytearray(m["data"]))
for address in self.inputs:
self.inputs[address] = triton.convertMemoryToSymbolicVariable(
triton.MemoryAccess(
address,
triton.CPUSIZE.BYTE
)
)
# Triton does not handle class method callbacks, use staticmethod.
@staticmethod
def memoryCaching(mem):
addr = mem.getAddress()
size = mem.getSize()
mapped = triton.isMemoryMapped(addr)
if not mapped:
dump = pimp.memoryCaching.memsolver.read_mem(addr, size)
triton.setConcreteMemoryAreaValue(addr, bytearray(dump))
cache.append({"start": addr, "data": bytearray(dump)})
return
@staticmethod
def constantFolding(node):
if node.isSymbolized():
return node
return triton.ast.bv(node.evaluate(), node.getBitvectorSize())
def get_current_pc(self):
return triton.getConcreteRegisterValue(self.pcreg)
def disassemble_inst(self, pc=None):
_pc = self.get_current_pc()
if pc:
_pc = pc
opcodes = triton.getConcreteMemoryAreaValue(_pc, 16)
# Create the Triton instruction
inst = triton.Instruction()
inst.setOpcodes(opcodes)
inst.setAddress(_pc)
# disassemble instruction
triton.disassembly(inst)
return inst
def inst_iter(self, pc=None):
while True:
inst = self.process_inst()
if inst.getType() == triton.OPCODE.HLT:
break
yield inst
def process_inst(self, pc=None):
_pc = self.get_current_pc()
if pc:
_pc = pc
opcodes = triton.getConcreteMemoryAreaValue(_pc, 16)
# Create the Triton instruction
inst = triton.Instruction()
inst.setOpcodes(opcodes)
inst.setAddress(_pc)
# execute instruction
triton.processing(inst)
return inst
def add_input(self, addr, size):
for offset in xrange(size):
self.inputs[addr + offset] = triton.convertMemoryToSymbolicVariable(
triton.MemoryAccess(
addr + offset,
triton.CPUSIZE.BYTE
)
)
def is_conditional(self, inst):
return inst.getType() in (triton.OPCODE.JAE, triton.OPCODE.JA, triton.OPCODE.JBE, triton.OPCODE.JB, triton.OPCODE.JCXZ, triton.OPCODE.JECXZ, triton.OPCODE.JE, triton.OPCODE.JGE, triton.OPCODE.JG, triton.OPCODE.JLE, triton.OPCODE.JL, triton.OPCODE.JNE, triton.OPCODE.JNO, triton.OPCODE.JNP, triton.OPCODE.JNS, triton.OPCODE.JO, triton.OPCODE.JP, triton.OPCODE.JS)
def symulate(self, stop=None, stop_on_sj=False):
while True:
inst = self.disassemble_inst()
print inst
if inst.getAddress() == stop or inst.getType() == triton.OPCODE.HLT:
return inst.getAddress()
inst = self.process_inst()
isSymbolized = inst.isSymbolized()
if isSymbolized:
for access, ast in inst.getLoadAccess():
if(access.getAddress() in self.inputs):
self.comments[inst.getAddress()] = "symbolized memory: 0x{:x}".format(access.getAddress())
rr = inst.getReadRegisters()
if rr:
reglist = []
for r, ast in rr:
if ast.isSymbolized():
reglist.append(r.getName())
self.comments[inst.getAddress()] = "symbolized regs: {}".format(" ,".join(reglist))
if (stop_on_sj == True and isSymbolized and inst.isControlFlow() and (inst.getType() != triton.OPCODE.JMP)):
return inst.getAddress()
def process_constraint(self, cstr):
global cache
# request a model verifying cstr
model = triton.getModel(cstr)
if not model:
return False
# apply model to memory cache
for m in model:
for address in self.inputs:
if model[m].getId() == self.inputs[address].getId():
nCache = []
for c in cache:
if c["start"] <= address < c["start"] + len(c["data"]):
c["data"][address-c["start"]] = model[m].getValue()
nCache.append(c)
cache = nCache
return True
def build_jmp_constraint(self, pc=None, take=True):
_pc = self.get_current_pc()
if pc:
_pc = pc
inst = self.disassemble_inst(_pc)
if take:
target = inst.getFirstOperand().getValue()
else:
target = _pc + inst.getSize()
pco = triton.getPathConstraints()
cstr = triton.ast.equal(triton.ast.bvtrue(), triton.ast.bvtrue())
for pc in pco:
if pc.isMultipleBranches():
branches = pc.getBranchConstraints()
for branch in branches:
taken = branch["isTaken"]
src = branch["srcAddr"]
dst = branch["dstAddr"]
bcstr = branch["constraint"]
isPreviousBranchConstraint = (src != _pc) and taken
isBranchToTake = src == _pc and dst == target
if isPreviousBranchConstraint or isBranchToTake:
cstr = triton.ast.land(cstr, bcstr)
cstr = triton.ast.assert_(cstr)
return cstr
@staticmethod
def isMapped(addr):
for m in cache:
if m["start"] <= addr < m["start"] + len(m["data"]):
return True
return False
try:
_r2_plugin_args = _r2_plugin_args.split()
except NameError as e:
print "[*] pimp.py cannot be called directly, use pimp_wrapper.py"
exit()
if "cache" not in globals():
cache = []
if "pimp" not in globals():
pimp = Pimp()
def get_byte(address):
for m in cache:
if m["start"] <= address < m["start"] + len(m["data"]):
idx = address - m["start"]
return struct.pack("B", m["data"][idx])
# initialise the Triton context with current r2 state (registers)
@pimp.r2p.r2cmd("init")
def cmd_init(p, a):
p.regs = p.r2p.get_regs()
p.reset()
# continue until address
@pimp.r2p.r2cmd("dcu")
def cmd_until(p, a):
target = p.r2p.integer(a[0])
addr = p.symulate(stop=target, stop_on_sj=True)
assert(addr==target)
p.r2p.seek(addr)
return
# continue until symbolized jump
@pimp.r2p.r2cmd("dcusj")
def cmd_until_symjump(p, a):
addr = p.symulate(stop_on_sj=True)
for caddr in p.comments:
p.r2p.set_comment(p.comments[caddr], caddr)
p.r2p.seek(addr)
p.r2p.set_flag("regs", p.pcreg.getName(), 1, addr)
# go to current jump target
@pimp.r2p.r2cmd("take")
def cmd_take_symjump(p, a):
addr = p.r2p.seek()
inst = p.disassemble_inst(addr)
if not p.is_conditional(inst):
print "error: invalid instruction type"
return
target = inst.getFirstOperand().getValue()
cstr = p.build_jmp_constraint(pc=addr)
if not p.process_constraint(cstr):
print "error: could not resolve constraint"
return
# reset and execute intil target is reached
p.reset()
for inst in p.inst_iter():
if inst.getAddress() == target:
p.r2p.seek(target)
p.r2p.set_flag("regs", p.pcreg.getName(), 1, target)
return
print "error: end of execution"
# avoid current jump target
@pimp.r2p.r2cmd("avoid")
def cmd_avoid_symjump(p, a):
addr = p.r2p.seek()
inst = p.disassemble_inst(addr)
if not p.is_conditional(inst):
print "error: invalid instruction type"
return
target = inst.getAddress() + inst.getSize()
cstr = p.build_jmp_constraint(pc=addr, take=False)
if not p.process_constraint(cstr):
print "error: could not resolve constraint"
return
# reset and execute intil target is reached
p.reset()
for inst in p.inst_iter():
if inst.getAddress() == target:
p.r2p.seek(target)
p.r2p.set_flag("regs", p.pcreg.getName(), 1, target)
return
print "error: end of execution"
@pimp.r2p.r2cmd("symulate")
def cmd_symulate(p, a):
pass
# define symbolized memory
@pimp.r2p.r2cmd("input")
def cmd_symbolize(p, a):
size = p.r2p.integer(a[0])
addr = p.r2p.integer(a[1])
p.add_input(addr, size)
p.reset()
# sync r2 with input generated by triton
@pimp.r2p.r2cmd("sync")
def cmd_sync_input(p, a):
for address in p.inputs:
p.r2p.write_mem(address, get_byte(address))
# reset memory with r2 current state
@pimp.r2p.r2cmd("reset")
def cmd_reset(p, a):
global cache
ncache = []
for m in cache:
addr = m["start"]
size = len(m["data"])
data = p.r2p.read_mem(addr, size)
triton.setConcreteMemoryAreaValue(addr, bytearray(data))
ncache.append({"start": addr, "data": data})
cache = ncache
pimp.r2p.handle()
| lgpl-3.0 | 6,267,235,369,199,245,000 | 29.678241 | 370 | 0.563571 | false | 3.49868 | false | false | false |
SlashRoot/WHAT | what_apps/utility/forms.py | 1 | 10552 |
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import models
from django.forms.formsets import BaseFormSet
from django.forms.widgets import flatatt
from django.utils.encoding import smart_unicode
from django.utils.html import escape
from django.utils.safestring import mark_safe
from what_apps.people.models import GenericParty
from what_apps.utility.functions import daily_crypt
class JqueryDatePicker(forms.DateField):
def __init__(self, *args, **kwargs):
super(JqueryDatePicker, self).__init__(*args, **kwargs)
self.widget.format = '%m/%d/%Y'
self.widget.attrs.update({'class':'datePicker', 'readonly':'true'})
class RequiredFormSet(BaseFormSet):
def __init__(self, *args, **kwargs):
super(RequiredFormSet, self).__init__(*args, **kwargs)
for form in self.forms:
form.empty_permitted = False
class AutoCompleteWidget(forms.TextInput):
'''
This widget is a little whacky and wild.
What actually happens here is that the widget itself is a hidden field that gets populated with encrypted data by ajax.
Additionally, there is a visible 'lookup' field into which the user types the autocomplete terms of their dreams.
'''
def __init__(self, new_buttons = False, models=None, with_description=False, name_visible_field=False, *args, **kwargs):
super(AutoCompleteWidget, self).__init__(*args, **kwargs)
self.name_visible_field = name_visible_field #We want the visible field to be named (ie, make its way into POST) iff we have set the field in the form as such.
#Let's populate the list of models.
list_of_models = ""
for model_info in models:
try:
counter = 0
for counter, property in enumerate(model_info): #model info might be a list (if we have more than one model to autcomplete against) or just a ModelBase. It's EAFP, so we'll presume we've been handed a tuple and deal with the other case in the block below.
#If model_info is in fact a list, we're going to loop through it.
if counter == 0: #The first time through, we know we have the model.
model = property
meta = str(model._meta) #Get the model meta, ie people.member
list_of_models += (meta) #Separate the model _meta by comma
else: #For subsequent iterations, we have a property name of the model. We want to add that to the string.
if counter == 1: #If this is the second iteration, we are beginning a list of the properties against which we are going to autcomplete.
list_of_models += "__" + property #Add the first property
else:#This is not the first property; it's at least the second. We'll separate these by ampersands.
list_of_models += "&" + property #Add the first property
except TypeError:
model = model_info
meta = str(model._meta) #Get the model meta, ie people.member
list_of_models += (meta) #Separate the model _meta by comma
list_of_models += ","
list_of_models = list_of_models[:-1] #Kill that last trailing comma
self.encrypted_models = list_of_models
if new_buttons:
#They have asked for the little buttons to add new instances.
if not models:
#...but they didn't even have the decency to pass the models to us.
raise RuntimeError('The llamas did not march. You must either specify models for this widget or set new_buttons to False.')
#OK, they gave us the models. Let's give them the plus signs.
self.add_html = ''
for model in models:
#Go through the models and add little plus signs, plus the model name.
try: #Maybe the object in question is in fact a model...
app_name = str(model._meta).split('.')[0]
model_name = str(model._meta).split('.')[1]
except AttributeError: #Or maybe it's a list...
#In which cast we want model[0]
app_name = str(model[0]._meta).split('.')[0]
model_name = str(model[0]._meta).split('.')[1]
#INTERUPPTOSAURUS!
#If it's the user model, I want to force them to the contact form (instead of the auth.user admin page, which doesn't really do much for us, now does it?).
if app_name == 'auth' and model_name == 'user':
add_url = '/contact/new_contact'
model_name = 'contact'
else:
add_url = '/admin/' + app_name + '/' + model_name + '/add'
self.add_html += '<a target="_blank" href="'+ add_url + '" class="addButton"><span class="underText">' + model_name + '</span></a>'
else: #They didn't ask for the buttons.
self.add_html = False
def render(self, name, value=None, attrs=None):
'''
Justin here. I'm actually not sure what the fuck is going on here. Lost in my own code.
'''
final_attrs = self.build_attrs(attrs, name=name)
lookup_attrs = self.build_attrs(attrs)
if value:
final_attrs['value'] = escape(smart_unicode(value))
lookup_attrs['value'] = final_attrs['value'].split('___')[1]
if not self.attrs.has_key('id'):
final_attrs['id'] = 'id_%s' % name
lookup_attrs['id'] = final_attrs['id'] + '_lookup'
lookup_attrs['class'] = 'autocompleteField autocompleteFieldIncomplete'
if self.name_visible_field:
lookup_attrs['name'] = 'lookup_%s' % name
lookup_attrs['elephant_data'] = str(self.encrypted_models)
final_attrs['type'] = 'hidden'
input_html = mark_safe(u'<input%s />' % flatatt(final_attrs))
widget_html = input_html #So far, just input_html
lookup_html = mark_safe(u'<input%s />' % flatatt(lookup_attrs))
widget_html += lookup_html #Now add the hidden_html
if self.add_html:
widget_html += self.add_html #Now add the plus signs
return widget_html
class AutoCompleteField(forms.Field):
'''
Takes a tuple for models, autocompletes against their haystack entires.
'''
def __init__(self, models=None, new_buttons=False, with_description=True, name_visible_field=False, *args, **kwargs):
super(AutoCompleteField, self).__init__(*args, **kwargs)
self.widget = AutoCompleteWidget(new_buttons=new_buttons, models=models, name_visible_field=name_visible_field, with_description=with_description)
def to_python(self, value):
'''
At the moment, this is not particularly useful.
Better will be to actually decode the value from the hidden field and pass it if the field is shown to be valid.
'''
if value in validators.EMPTY_VALUES:
return u''
encrypted_result = value.split('___')[0]
#result = daily_crypt(encrypted_result, decrypt=True)
result = encrypted_result
#This is now copied in utilities.function.get_object_from_string - please deprecate
result_meta = result.split('_')[0] #Now just app and model.
result_id = result.split('_')[1] #....and the ID of the object.
result_app = result_meta.split('.')[0] #Everything before the . is the app name
result_model = result_meta.split('.')[1] #Everything after is the model name
#Get model of object in question
model = ContentType.objects.get(app_label = result_app, model = result_model).model_class()
#POSITIVE IDENTIFICATION CONFIRMED. PROCEED WITH EXPERIMENT.
result_object = model.objects.get(id=result_id)
return result_object
class GenericPartyField(AutoCompleteField):
def __init__(self, new_buttons=True, *args, **kwargs):
super(GenericPartyField, self).__init__(models=([User,'first_name', 'last_name', 'username', 'email'], Group), new_buttons=new_buttons)
def to_python(self, value):
try:
object = super(GenericPartyField, self).to_python(value)
generic_party = GenericParty.objects.get(party=object)
except AttributeError: #Catch Justin's Attribute Error on Generic Party, ensure that the form can't validate
raise ValidationError('GenericPartyField must be a User or a Group')
return generic_party
def validate(self, value):
'''
Formerly "holy cow"
'''
pass
class SimplePartyLookup(forms.Form):
party_lookup = GenericPartyField(new_buttons=False)
class ManyGenericPartyField(AutoCompleteField):
def __init__(self, *args, **kwargs):
super(ManyGenericPartyField, self).__init__(models=(User, Group), new_buttons=True)
def to_python(self, value):
object = super(ManyGenericPartyField, self).to_python(value)
if object:
generic_party = GenericParty.objects.get(party=object)
else:
raise ValidationError("Gotta pass me somethin'")
return [generic_party,]
class MustBeUniqueField(forms.Field):
'''
Takes a string in the form of appname.model__field and ensures that the value is unique for that field.
'''
def __init__(self, field=None, *args, **kwargs):
super(MustBeUniqueField, self).__init__(*args, **kwargs)
encrypted_field = daily_crypt(field) #Encrypt the list with today's daily salt
self.widget.attrs['class'] = 'mustBeUniqueField'
self.widget.attrs['elephant_data'] = str(encrypted_field)
def get_bool_from_html(piece):
if piece in ['False', 'false', '0', 0]:
return False
else:
return True | mit | 3,383,373,050,029,385,000 | 44.683983 | 272 | 0.594484 | false | 4.237751 | false | false | false |
citizen-stig/pyjtt | setup.py | 1 | 2399 | import sys
import os
from cx_Freeze import setup, Executable
path = ["pyjtt"] + sys.path
icon_path = os.path.join("resources", "icons", "clock.ico")
build_exe_options = {'path': path, 'include_msvcr': True}
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa371847(v=vs.85).aspx
shortcut_table = [
("DesktopShortcut", # Shortcut
"DesktopFolder", # Directory_
"pyJTT", # Name
"TARGETDIR", # Component_
"[TARGETDIR]pyjtt.exe", # Target
None, # Arguments
None, # Description
None, # Hotkey
None, # Icon
None, # IconIndex
None, # ShowCmd
'%APPDATA%\pyjtt' # WkDir
),
("ProgramMenuShortcut", # Shortcut
"ProgramMenuFolder", # Directory_
"pyJTT", # Name
"TARGETDIR", # Component_
"[TARGETDIR]pyjtt.exe", # Target
None, # Arguments
None, # Description
None, # Hotkey
None, # Icon
None, # IconIndex
None, # ShowCmd
'%APPDATA%\pyjtt' # WkDir
)]
# Now create the table dictionary
msi_data = {"Shortcut": shortcut_table}
# Change some default MSI options and specify the
# use of the above defined tables
bdist_msi_options = {'data': msi_data}
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
if sys.platform == "win32":
base = "Win32GUI"
target_app = os.path.join("pyjtt", "app.py")
setup(name="pyjtt",
version="1.2.3",
description="Jira Time Tracker",
maintainer="Nikolay Golub",
maintainer_email="[email protected]",
long_description="Allows track time in JIRA online and manage worklogs",
license="GNU GENERAL PUBLIC LICENSE Version 3",
options={"build_exe": build_exe_options,
"bdist_msi": bdist_msi_options, },
executables=[Executable(target_app,
base=base,
targetName="pyjtt.exe",
icon=icon_path,
shortcutName="pyJTT",
shortcutDir="DesktopFolder")])
| gpl-3.0 | -2,657,565,497,915,762,000 | 34.80597 | 80 | 0.521884 | false | 3.907166 | false | false | false |
sdrogers/ms2ldaviz | ms2ldaviz/basicviz/migrations/0001_initial.py | 1 | 1168 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=32)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Experiment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=128)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='document',
name='experiment',
field=models.ForeignKey(to='basicviz.Experiment', on_delete=models.CASCADE),
preserve_default=True,
),
]
| mit | 8,181,280,632,879,288,000 | 28.948718 | 114 | 0.524829 | false | 4.653386 | false | false | false |
mohabusama/togile | togile/apps/todo/migrations/0001_initial.py | 1 | 6624 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TodoList'
db.create_table(u'todo_todolist', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='parent_todo', unique=True, null=True, to=orm['todo.TodoItem'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'todo', ['TodoList'])
# Adding model 'TodoItem'
db.create_table(u'todo_todoitem', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('todo_list', self.gf('django.db.models.fields.related.ForeignKey')(related_name='todo_list', to=orm['todo.TodoList'])),
('value', self.gf('django.db.models.fields.TextField')()),
('status', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'todo', ['TodoItem'])
def backwards(self, orm):
# Deleting model 'TodoList'
db.delete_table(u'todo_todolist')
# Deleting model 'TodoItem'
db.delete_table(u'todo_todoitem')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'todo.todoitem': {
'Meta': {'object_name': 'TodoItem'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'todo_list': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'todo_list'", 'to': u"orm['todo.TodoList']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'todo.todolist': {
'Meta': {'object_name': 'TodoList'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_todo'", 'unique': 'True', 'null': 'True', 'to': u"orm['todo.TodoItem']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['todo'] | mit | 3,963,344,749,262,621,000 | 65.919192 | 195 | 0.569143 | false | 3.578606 | false | false | false |
desihub/desispec | py/desispec/magnitude.py | 1 | 2904 | """
desispec.magnitude
========================
Broadband flux and magnitudes
"""
import numpy as np
def compute_broadband_flux(spectrum_wave,spectrum_flux,transmission_wave,transmission_value) :
"""
Computes broadband flux
Args:
spectrum_wave: 1D numpy array (Angstrom)
spectrum_flux: 1D numpy array is some input density unit, same size as spectrum_wave
transmission_wave: 1D numpy array (Angstrom)
transmission_value: 1D numpy array , dimensionless, same size as transmission_wave
Returns:
integrated flux (unit= A x (input density unit)) , scalar
"""
# same size
assert(spectrum_wave.size==spectrum_flux.size)
assert(transmission_wave.size==transmission_value.size)
# sort arrays, just in case
ii=np.argsort(spectrum_wave)
jj=np.argsort(transmission_wave)
# tranmission contained in spectrum
assert(spectrum_wave[ii[0]]<=transmission_wave[jj[0]])
assert(spectrum_wave[ii[-1]]>=transmission_wave[jj[-1]])
kk=(spectrum_wave>=transmission_wave[jj[0]])&(spectrum_wave<=transmission_wave[jj[-1]])
# wavelength grid combining both grids in transmission_wave region
wave=np.unique(np.hstack([spectrum_wave[kk],transmission_wave]))
# value is product of interpolated values
val=np.interp(wave,spectrum_wave[ii],spectrum_flux[ii])*np.interp(wave,transmission_wave[jj],transmission_value[jj])
trapeze_area = (val[1:]+val[:-1])*(wave[1:]-wave[:-1])/2.
return np.sum(trapeze_area)
def ab_flux_in_ergs_s_cm2_A(wave) :
"""
Args:
wave: 1D numpy array (Angstrom)
Returns:
ab flux in units of ergs/s/cm2/A
"""
#import astropy.units
#default_wavelength_unit = astropy.units.Angstrom
#default_flux_unit = astropy.units.erg / astropy.units.cm**2 / astropy.units.s / default_wavelength_unit
#_ab_constant = 3631. * astropy.units.Jansky * astropy.constants.c).to(default_flux_unit * default_wavelength_unit**2)
_ab_constant = 0.10885464 # Angstrom erg / (cm2 s)
return _ab_constant / wave**2
def compute_ab_mag(spectrum_wave,spectrum_flux,transmission_wave,transmission_value) :
"""
Computes ab mag
Args:
spectrum_wave: 1D numpy array (Angstrom)
spectrum_flux: 1D numpy array (in units of 1e-17 ergs/s/cm2/A), same size as spectrum_wave
transmission_wave: 1D numpy array (Angstrom)
transmission_value: 1D numpy array , dimensionless, same size as transmission_wave
Returns:
mag (float scalar)
"""
numerator = 1e-17*compute_broadband_flux(spectrum_wave,spectrum_flux,transmission_wave,transmission_value)
# use same wavelength grid for denominator to limit interpolation biases
denominator = compute_broadband_flux(spectrum_wave,ab_flux_in_ergs_s_cm2_A(spectrum_wave),transmission_wave,transmission_value)
# may return NaN
return - 2.5 * np.log10(numerator/denominator)
| bsd-3-clause | 717,909,583,657,756,400 | 32 | 131 | 0.694904 | false | 3.330275 | false | false | false |
suvarnaraju/robotframework | src/robot/utils/escaping.py | 18 | 4166 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .robottypes import is_string
_CONTROL_WORDS_TO_BE_ESCAPED = ('ELSE', 'ELSE IF', 'AND')
_SEQUENCES_TO_BE_ESCAPED = ('\\', '${', '@{', '%{', '&{', '*{', '=')
def escape(item):
if not is_string(item):
return item
if item in _CONTROL_WORDS_TO_BE_ESCAPED:
return '\\' + item
for seq in _SEQUENCES_TO_BE_ESCAPED:
if seq in item:
item = item.replace(seq, '\\' + seq)
return item
def unescape(item):
if not (is_string(item) and '\\' in item):
return item
return Unescaper().unescape(item)
class Unescaper(object):
_escaped = re.compile(r'(\\+)([^\\]*)')
def unescape(self, string):
return ''.join(self._yield_unescaped(string))
def _yield_unescaped(self, string):
while '\\' in string:
finder = EscapeFinder(string)
yield finder.before + finder.backslashes
if finder.escaped and finder.text:
yield self._unescape(finder.text)
else:
yield finder.text
string = finder.after
yield string
def _unescape(self, text):
try:
escape = str(text[0])
except UnicodeError:
return text
try:
unescaper = getattr(self, '_unescaper_for_' + escape)
except AttributeError:
return text
else:
return unescaper(text[1:])
def _unescaper_for_n(self, text):
if text.startswith(' '):
text = text[1:]
return '\n' + text
def _unescaper_for_r(self, text):
return '\r' + text
def _unescaper_for_t(self, text):
return '\t' + text
def _unescaper_for_x(self, text):
return self._unescape_character(text, 2, 'x')
def _unescaper_for_u(self, text):
return self._unescape_character(text, 4, 'u')
def _unescaper_for_U(self, text):
return self._unescape_character(text, 8, 'U')
def _unescape_character(self, text, length, escape):
try:
char = self._get_character(text[:length], length)
except ValueError:
return escape + text
else:
return char + text[length:]
def _get_character(self, text, length):
if len(text) < length or not text.isalnum():
raise ValueError
ordinal = int(text, 16)
# No Unicode code points above 0x10FFFF
if ordinal > 0x10FFFF:
raise ValueError
# unichr only supports ordinals up to 0xFFFF with narrow Python builds
if ordinal > 0xFFFF:
return eval("u'\\U%08x'" % ordinal)
return unichr(ordinal)
class EscapeFinder(object):
_escaped = re.compile(r'(\\+)([^\\]*)')
def __init__(self, string):
res = self._escaped.search(string)
self.before = string[:res.start()]
escape_chars = len(res.group(1))
self.backslashes = '\\' * (escape_chars // 2)
self.escaped = bool(escape_chars % 2)
self.text = res.group(2)
self.after = string[res.end():]
def split_from_equals(string):
index = _get_split_index(string)
if index == -1:
return string, None
return string[:index], string[index+1:]
def _get_split_index(string):
index = 0
while '=' in string[index:]:
index += string[index:].index('=')
if _not_escaping(string[:index]):
return index
index += 1
return -1
def _not_escaping(name):
backslashes = len(name) - len(name.rstrip('\\'))
return backslashes % 2 == 0
| apache-2.0 | 4,818,477,688,823,138,000 | 28.546099 | 78 | 0.585934 | false | 3.763324 | false | false | false |
alexgorban/models | research/object_detection/builders/box_predictor_builder.py | 1 | 46974 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to build box predictor from configuration."""
import collections
import tensorflow as tf
from object_detection.predictors import convolutional_box_predictor
from object_detection.predictors import convolutional_keras_box_predictor
from object_detection.predictors import mask_rcnn_box_predictor
from object_detection.predictors import mask_rcnn_keras_box_predictor
from object_detection.predictors import rfcn_box_predictor
from object_detection.predictors import rfcn_keras_box_predictor
from object_detection.predictors.heads import box_head
from object_detection.predictors.heads import class_head
from object_detection.predictors.heads import keras_box_head
from object_detection.predictors.heads import keras_class_head
from object_detection.predictors.heads import keras_mask_head
from object_detection.predictors.heads import mask_head
from object_detection.protos import box_predictor_pb2
def build_convolutional_box_predictor(is_training,
num_classes,
conv_hyperparams_fn,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
apply_sigmoid_to_scores=False,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None):
"""Builds the ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
apply_sigmoid_to_scores: If True, apply the sigmoid on the output
class_predictions.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: Constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
Returns:
A ConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
apply_sigmoid_to_scores=apply_sigmoid_to_scores,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise)
other_heads = {}
return convolutional_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth)
def build_convolutional_keras_box_predictor(is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None,
name='BoxPredictor'):
"""Builds the Keras ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
Returns:
A Keras ConvolutionalBoxPredictor class.
"""
box_prediction_heads = []
class_prediction_heads = []
other_heads = {}
for stack_index, num_predictions_per_location in enumerate(
num_predictions_per_location_list):
box_prediction_heads.append(
keras_box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range,
name='ConvolutionalBoxHead_%d' % stack_index))
class_prediction_heads.append(
keras_class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
name='ConvolutionalClassHead_%d' % stack_index))
return convolutional_keras_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_heads=box_prediction_heads,
class_prediction_heads=class_prediction_heads,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
name=name)
def build_weight_shared_convolutional_box_predictor(
is_training,
num_classes,
conv_hyperparams_fn,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
keyword_args=None):
"""Builds and returns a WeightSharedConvolutionalBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
keyword_args: A dictionary with additional args.
Returns:
A WeightSharedConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = (
class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
kernel_size=kernel_size,
class_prediction_bias_init=class_prediction_bias_init,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
use_depthwise=use_depthwise,
score_converter_fn=score_converter_fn))
other_heads = {}
return convolutional_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise)
def build_weight_shared_convolutional_keras_box_predictor(
is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
name='WeightSharedConvolutionalBoxPredictor',
keyword_args=None):
"""Builds the Keras WeightSharedConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
keyword_args: A dictionary with additional args.
Returns:
A Keras WeightSharedConvolutionalBoxPredictor class.
"""
if len(set(num_predictions_per_location_list)) > 1:
raise ValueError('num predictions per location must be same for all'
'feature maps, found: {}'.format(
num_predictions_per_location_list))
num_predictions_per_location = num_predictions_per_location_list[0]
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range,
name='WeightSharedConvolutionalBoxHead')
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
score_converter_fn=score_converter_fn,
name='WeightSharedConvolutionalClassHead')
other_heads = {}
return (
convolutional_keras_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise,
name=name))
def build_mask_rcnn_keras_box_predictor(is_training,
num_classes,
fc_hyperparams,
freeze_batchnorm,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNKerasBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for fully connected dense ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNKerasBoxPredictor class.
"""
box_prediction_head = keras_box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = keras_class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = keras_mask_head.MaskRCNNMaskHead(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_keras_box_predictor.MaskRCNNKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
freeze_batchnorm=freeze_batchnorm,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_mask_rcnn_box_predictor(is_training,
num_classes,
fc_hyperparams_fn,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams_fn=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for fully connected ops.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNBoxPredictor class.
"""
box_prediction_head = box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = mask_head.MaskRCNNMaskHead(
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_box_predictor.MaskRCNNBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_score_converter(score_converter_config, is_training):
"""Builds score converter based on the config.
Builds one of [tf.identity, tf.sigmoid] score converters based on the config
and whether the BoxPredictor is for training or inference.
Args:
score_converter_config:
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter.
is_training: Indicates whether the BoxPredictor is in training mode.
Returns:
Callable score converter op.
Raises:
ValueError: On unknown score converter.
"""
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY):
return tf.identity
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID):
return tf.identity if is_training else tf.sigmoid
raise ValueError('Unknown score converter.')
BoxEncodingsClipRange = collections.namedtuple('BoxEncodingsClipRange',
['min', 'max'])
def build(argscope_fn, box_predictor_config, is_training, num_classes,
add_background_class=True):
"""Builds box predictor based on the configuration.
Builds box predictor based on the configuration. See box_predictor.proto for
configurable options. Also, see box_predictor.py for more details.
Args:
argscope_fn: A function that takes the following inputs:
* hyperparams_pb2.Hyperparams proto
* a boolean indicating if the model is in training mode.
and returns a tf slim argscope for Conv and FC hyperparameters.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.BoxPredictor object.
Raises:
ValueError: On unknown box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
apply_sigmoid_to_scores=config_box_predictor.apply_sigmoid_to_scores,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams_fn = argscope_fn(config_box_predictor.fc_hyperparams,
is_training)
conv_hyperparams_fn = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams_fn = argscope_fn(
config_box_predictor.conv_hyperparams, is_training)
return build_mask_rcnn_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = rfcn_box_predictor.RfcnBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof))
def build_keras(hyperparams_fn, freeze_batchnorm, inplace_batchnorm_update,
num_predictions_per_location_list, box_predictor_config,
is_training, num_classes, add_background_class=True):
"""Builds a Keras-based box predictor based on the configuration.
Builds Keras-based box predictor based on the configuration.
See box_predictor.proto for configurable options. Also, see box_predictor.py
for more details.
Args:
hyperparams_fn: A function that takes a hyperparams_pb2.Hyperparams
proto and returns a `hyperparams_builder.KerasLayerHyperparams`
for Conv or FC hyperparameters.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.KerasBoxPredictor object.
Raises:
ValueError: On unknown box predictor, or one with no Keras box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly. This is
# required because during TPU inference, model.postprocess is not called.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
add_background_class=add_background_class,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams = hyperparams_fn(config_box_predictor.fc_hyperparams)
conv_hyperparams = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
return build_mask_rcnn_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams=conv_hyperparams,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
box_predictor_object = rfcn_keras_box_predictor.RfcnKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError(
'Unknown box predictor for Keras: {}'.format(box_predictor_oneof))
| apache-2.0 | 4,100,371,919,349,700,000 | 47.178462 | 80 | 0.679418 | false | 4.023469 | true | false | false |
zbanks/musicazoo | musicazoo/lib/watch_dl.py | 1 | 3170 | # -*- coding: utf-8 -*-
import urllib
import re
import urllib2
import sys
import HTMLParser
from youtube_dl.extractor.common import InfoExtractor
html_parser = HTMLParser.HTMLParser()
class WatchCartoonOnlineIE(InfoExtractor):
#IE_NAME = u'WatchCartoonOnline'
_VALID_URL = r'(?:http://)?(?:www\.)?watchcartoononline\.com/([^/]+)'
def _real_extract(self,url):
o=urllib2.build_opener(urllib2.HTTPHandler).open
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
webpage = o('http://www.watchcartoononline.com/{0}'.format(video_id)).read()
title_escaped = re.search(r'<h1.*?>(.+?)</h1>',webpage).group(1)
title = html_parser.unescape(title_escaped)
video_url = re.search(r'<iframe id="(.+?)0" (.+?)>', webpage).group()
video_url = re.search('src="(.+?)"', video_url).group(1).replace(' ','%20')
params = urllib.urlencode({'fuck_you':'','confirm':'Click Here to Watch Free!!'})
request = urllib2.Request(video_url,params)
video_webpage = o(request).read()
final_url = re.findall(r'file: "(.+?)"', video_webpage)
redirect_url=urllib.unquote(final_url[-1]).replace(' ','%20')
flv_url = o(redirect_url).geturl()
return {'url':flv_url, 'title':title, 'id': video_id}
def downloader(fileurl,file_name):
u = urllib2.urlopen(fileurl)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "[watchcartoononline-dl] Downloading %s (%s bytes)" %(file_name, file_size)
file_size_dl = 0
block_size = 8192
#Download loop
while True:
buffer = u.read(block_size)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%s [%3.2f%%]" % (convertSize(file_size_dl), file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
#print status
sys.stdout.write("\r %s" % status)
sys.stdout.flush()
#Download done. Close file stream
f.close()
def convertSize(n, format='%(value).1f %(symbol)s', symbols='customary'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
"""
SYMBOLS = {
'customary' : ('B', 'K', 'Mb', 'G', 'T', 'P', 'E', 'Z', 'Y'),
'customary_ext' : ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'),
'iec' : ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext' : ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'),
}
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = SYMBOLS[symbols]
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
| mit | -4,147,889,361,817,719,000 | 34.222222 | 95 | 0.555836 | false | 3.176353 | false | false | false |
desihub/fiberassign | py/fiberassign/test/fiberassign_test_suite.py | 1 | 1121 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
fiberassign.test.fiberassign_test_suite
===================================================
Used to initialize the unit test framework via ``python setup.py test``.
"""
from __future__ import absolute_import, division, print_function
import sys
import unittest
def fiberassign_test_suite():
"""Returns unittest.TestSuite of desiutil tests.
This is factored out separately from runtests() so that it can be used by
``python setup.py test``.
"""
from os.path import dirname
py_dir = dirname(dirname(__file__))
return unittest.defaultTestLoader.discover(py_dir, top_level_dir=dirname(py_dir))
def runtests():
"""Run all tests in fiberassign.test.test_*."""
# Load all TestCase classes from desispec/test/test_*.py
tests = fiberassign_test_suite()
# Run them and force exit with a non-zero process return value if they fail
ret = unittest.TextTestRunner(verbosity=2).run(tests)
if not ret.wasSuccessful():
sys.exit(ret)
if __name__ == "__main__":
runtests()
| bsd-3-clause | 5,689,746,902,278,580,000 | 27.74359 | 85 | 0.657449 | false | 3.878893 | true | false | false |
mayfield/snowflake-connector-python | network.py | 1 | 44805 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2017 Snowflake Computing Inc. All right reserved.
#
import collections
import contextlib
import copy
import gzip
import itertools
import json
import logging
import platform
import sys
import time
import uuid
from io import StringIO, BytesIO
from threading import Thread
import OpenSSL
from botocore.vendored import requests
from botocore.vendored.requests.adapters import HTTPAdapter
from botocore.vendored.requests.auth import AuthBase
from botocore.vendored.requests.exceptions import (ConnectionError, SSLError)
from botocore.vendored.requests.packages.urllib3.exceptions import (
ProtocolError)
from . import ssl_wrap_socket
from .compat import (
BAD_REQUEST, SERVICE_UNAVAILABLE, GATEWAY_TIMEOUT,
FORBIDDEN, BAD_GATEWAY,
UNAUTHORIZED, INTERNAL_SERVER_ERROR, OK, BadStatusLine,
urlsplit, unescape)
from .compat import (Queue, EmptyQueue)
from .compat import (TO_UNICODE, urlencode)
from .compat import proxy_bypass
from .errorcode import (ER_FAILED_TO_CONNECT_TO_DB, ER_CONNECTION_IS_CLOSED,
ER_FAILED_TO_REQUEST, ER_FAILED_TO_RENEW_SESSION,
ER_FAILED_TO_SERVER, ER_IDP_CONNECTION_ERROR,
ER_INCORRECT_DESTINATION)
from .errors import (Error, OperationalError, DatabaseError, ProgrammingError,
GatewayTimeoutError, ServiceUnavailableError,
InterfaceError, InternalServerError, ForbiddenError,
BadGatewayError, BadRequest)
from .gzip_decoder import decompress_raw_data
from .sqlstate import (SQLSTATE_CONNECTION_NOT_EXISTS,
SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
SQLSTATE_CONNECTION_REJECTED)
from .ssl_wrap_socket import set_proxies
from .util_text import split_rows_from_stream
from .version import VERSION
logger = logging.getLogger(__name__)
"""
Monkey patch for PyOpenSSL Socket wrapper
"""
ssl_wrap_socket.inject_into_urllib3()
import errno
REQUESTS_RETRY = 5 # requests retry
QUERY_IN_PROGRESS_CODE = u'333333' # GS code: the query is in progress
QUERY_IN_PROGRESS_ASYNC_CODE = u'333334' # GS code: the query is detached
SESSION_EXPIRED_GS_CODE = u'390112' # GS code: session expired. need to renew
DEFAULT_CONNECT_TIMEOUT = 1 * 60 # 60 seconds
DEFAULT_REQUEST_TIMEOUT = 2 * 60 # 120 seconds
CONTENT_TYPE_APPLICATION_JSON = u'application/json'
ACCEPT_TYPE_APPLICATION_SNOWFLAKE = u'application/snowflake'
REQUEST_TYPE_RENEW = u'RENEW'
REQUEST_TYPE_CLONE = u'CLONE'
REQUEST_TYPE_ISSUE = u'ISSUE'
HEADER_AUTHORIZATION_KEY = u"Authorization"
HEADER_SNOWFLAKE_TOKEN = u'Snowflake Token="{token}"'
SNOWFLAKE_CONNECTOR_VERSION = u'.'.join(TO_UNICODE(v) for v in VERSION[0:3])
PYTHON_VERSION = u'.'.join(TO_UNICODE(v) for v in sys.version_info[:3])
PLATFORM = platform.platform()
IMPLEMENTATION = platform.python_implementation()
COMPILER = platform.python_compiler()
CLIENT_NAME = u"PythonConnector"
CLIENT_VERSION = u'.'.join([TO_UNICODE(v) for v in VERSION[:3]])
PYTHON_CONNECTOR_USER_AGENT = \
u'{name}/{version}/{python_version}/{platform}'.format(
name=CLIENT_NAME,
version=SNOWFLAKE_CONNECTOR_VERSION,
python_version=PYTHON_VERSION,
platform=PLATFORM)
DEFAULT_AUTHENTICATOR = u'SNOWFLAKE' # default authenticator name
NO_TOKEN = u'no-token'
STATUS_TO_EXCEPTION = {
INTERNAL_SERVER_ERROR: InternalServerError,
FORBIDDEN: ForbiddenError,
SERVICE_UNAVAILABLE: ServiceUnavailableError,
GATEWAY_TIMEOUT: GatewayTimeoutError,
BAD_REQUEST: BadRequest,
BAD_GATEWAY: BadGatewayError,
}
def _is_prefix_equal(url1, url2):
"""
Checks if URL prefixes are identical. The scheme, hostname and port number
are compared. If the port number is not specified and the scheme is https,
the port number is assumed to be 443.
"""
parsed_url1 = urlsplit(url1)
parsed_url2 = urlsplit(url2)
port1 = parsed_url1.port
if not port1 and parsed_url1.scheme == 'https':
port1 = '443'
port2 = parsed_url1.port
if not port2 and parsed_url2.scheme == 'https':
port2 = '443'
return parsed_url1.hostname == parsed_url2.hostname and \
port1 == port2 and \
parsed_url1.scheme == parsed_url2.scheme
def _get_post_back_url_from_html(html):
"""
Gets the post back URL.
Since the HTML is not well formed, minidom cannot be used to convert to
DOM. The first discovered form is assumed to be the form to post back
and the URL is taken from action attributes.
"""
logger.debug(html)
idx = html.find('<form')
start_idx = html.find('action="', idx)
end_idx = html.find('"', start_idx + 8)
return unescape(html[start_idx + 8:end_idx])
class RequestRetry(Exception):
pass
class SnowflakeAuth(AuthBase):
"""
Attaches HTTP Authorization header for Snowflake
"""
def __init__(self, token):
# setup any auth-related data here
self.token = token
def __call__(self, r):
# modify and return the request
if HEADER_AUTHORIZATION_KEY in r.headers:
del r.headers[HEADER_AUTHORIZATION_KEY]
if self.token != NO_TOKEN:
r.headers[
HEADER_AUTHORIZATION_KEY] = HEADER_SNOWFLAKE_TOKEN.format(
token=self.token)
return r
class SnowflakeRestful(object):
"""
Snowflake Restful class
"""
def __init__(self, host=u'127.0.0.1', port=8080,
proxy_host=None,
proxy_port=None,
proxy_user=None,
proxy_password=None,
protocol=u'http',
connect_timeout=DEFAULT_CONNECT_TIMEOUT,
request_timeout=DEFAULT_REQUEST_TIMEOUT,
injectClientPause=0,
connection=None):
self._host = host
self._port = port
self._proxy_host = proxy_host
self._proxy_port = proxy_port
self._proxy_user = proxy_user
self._proxy_password = proxy_password
self._protocol = protocol
self._connect_timeout = connect_timeout or DEFAULT_CONNECT_TIMEOUT
self._request_timeout = request_timeout or DEFAULT_REQUEST_TIMEOUT
self._injectClientPause = injectClientPause
self._connection = connection
self._idle_sessions = collections.deque()
self._active_sessions = set()
self._request_count = itertools.count()
# insecure mode (disabled by default)
ssl_wrap_socket.FEATURE_INSECURE_MODE = \
self._connection and self._connection._insecure_mode
# cache file name (enabled by default)
ssl_wrap_socket.FEATURE_OCSP_RESPONSE_CACHE_FILE_NAME = \
self._connection and self._connection._ocsp_response_cache_filename
#
ssl_wrap_socket.PROXY_HOST = self._proxy_host
ssl_wrap_socket.PROXY_PORT = self._proxy_port
ssl_wrap_socket.PROXY_USER = self._proxy_user
ssl_wrap_socket.PROXY_PASSWORD = self._proxy_password
# This is to address the issue where requests hangs
_ = 'dummy'.encode('idna').decode('utf-8')
proxy_bypass('www.snowflake.net:443')
@property
def token(self):
return self._token if hasattr(self, u'_token') else None
@property
def master_token(self):
return self._master_token if hasattr(self, u'_master_token') else None
def close(self):
if hasattr(self, u'_token'):
del self._token
if hasattr(self, u'_master_token'):
del self._master_token
sessions = list(self._active_sessions)
if sessions:
logger.warn("Closing %s active sessions", len(sessions))
sessions.extend(self._idle_sessions)
self._active_sessions.clear()
self._idle_sessions.clear()
for s in sessions:
try:
s.close()
except Exception as e:
logger.warn("Session cleanup failed: %s", e)
def authenticate(self, account, user, password, master_token=None,
token=None, database=None, schema=None,
warehouse=None, role=None, passcode=None,
passcode_in_password=False, saml_response=None,
mfa_callback=None, password_callback=None,
session_parameters=None):
logger.debug(u'authenticate')
if token and master_token:
self._token = token
self._master_token = token
logger.debug(u'token is given. no authentication was done')
return
application = self._connection.application if \
self._connection else CLIENT_NAME
internal_application_name = \
self._connection._internal_application_name if \
self._connection else CLIENT_NAME
internal_application_version = \
self._connection._internal_application_version if \
self._connection else CLIENT_VERSION
request_id = TO_UNICODE(uuid.uuid4())
headers = {
u'Content-Type': CONTENT_TYPE_APPLICATION_JSON,
u"accept": ACCEPT_TYPE_APPLICATION_SNOWFLAKE,
u"User-Agent": PYTHON_CONNECTOR_USER_AGENT,
}
url = u"/session/v1/login-request"
body_template = {
u'data': {
u"CLIENT_APP_ID": internal_application_name,
u"CLIENT_APP_VERSION": internal_application_version,
u"SVN_REVISION": VERSION[3],
u"ACCOUNT_NAME": account,
u"CLIENT_ENVIRONMENT": {
u"APPLICATION": application,
u"OS_VERSION": PLATFORM,
u"PYTHON_VERSION": PYTHON_VERSION,
u"PYTHON_RUNTIME": IMPLEMENTATION,
u"PYTHON_COMPILER": COMPILER,
}
},
}
body = copy.deepcopy(body_template)
logger.debug(u'saml: %s', saml_response is not None)
if saml_response:
body[u'data'][u'RAW_SAML_RESPONSE'] = saml_response
else:
body[u'data'][u"LOGIN_NAME"] = user
body[u'data'][u"PASSWORD"] = password
logger.debug(
u'account=%s, user=%s, database=%s, schema=%s, '
u'warehouse=%s, role=%s, request_id=%s',
account,
user,
database,
schema,
warehouse,
role,
request_id,
)
url_parameters = {}
url_parameters[u'request_id'] = request_id
if database is not None:
url_parameters[u'databaseName'] = database
if schema is not None:
url_parameters[u'schemaName'] = schema
if warehouse is not None:
url_parameters[u'warehouse'] = warehouse
if role is not None:
url_parameters[u'roleName'] = role
if len(url_parameters) > 0:
url = url + u'?' + urlencode(url_parameters)
# first auth request
if passcode_in_password:
body[u'data'][u'EXT_AUTHN_DUO_METHOD'] = u'passcode'
elif passcode:
body[u'data'][u'EXT_AUTHN_DUO_METHOD'] = u'passcode'
body[u'data'][u'PASSCODE'] = passcode
if session_parameters:
body[u'data'][u'SESSION_PARAMETERS'] = session_parameters
logger.debug(
"body['data']: %s",
{k: v for (k, v) in body[u'data'].items() if k != u'PASSWORD'})
try:
ret = self._post_request(
url, headers, json.dumps(body),
timeout=self._connection._login_timeout)
except ForbiddenError as err:
# HTTP 403
raise err.__class__(
msg=(u"Failed to connect to DB. "
u"Verify the account name is correct: {host}:{port}, "
u"proxies={proxy_host}:{proxy_port}, "
u"proxy_user={proxy_user}. {message}").format(
host=self._host,
port=self._port,
proxy_host=self._proxy_host,
proxy_port=self._proxy_port,
proxy_user=self._proxy_user,
message=TO_UNICODE(err)
),
errno=ER_FAILED_TO_CONNECT_TO_DB,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED)
except (ServiceUnavailableError, BadGatewayError) as err:
# HTTP 502/504
raise err.__class__(
msg=(u"Failed to connect to DB. "
u"Service is unavailable: {host}:{port}, "
u"proxies={proxy_host}:{proxy_port}, "
u"proxy_user={proxy_user}. {message}").format(
host=self._host,
port=self._port,
proxy_host=self._proxy_host,
proxy_port=self._proxy_port,
proxy_user=self._proxy_user,
message=TO_UNICODE(err)
),
errno=ER_FAILED_TO_CONNECT_TO_DB,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED)
# this means we are waiting for MFA authentication
if ret[u'data'].get(u'nextAction') and ret[u'data'][
u'nextAction'] == u'EXT_AUTHN_DUO_ALL':
body[u'inFlightCtx'] = ret[u'data'][u'inFlightCtx']
body[u'data'][u'EXT_AUTHN_DUO_METHOD'] = u'push'
self.ret = None
def post_request_wrapper(self, url, headers, body):
# get the MFA response
self.ret = self._post_request(
url, headers, body,
timeout=self._connection._login_timeout)
# send new request to wait until MFA is approved
t = Thread(target=post_request_wrapper,
args=[self, url, headers, json.dumps(body)])
t.daemon = True
t.start()
if callable(mfa_callback):
c = mfa_callback()
while not self.ret:
next(c)
else:
t.join(timeout=120)
ret = self.ret
if ret[u'data'].get(u'nextAction') and ret[u'data'][
u'nextAction'] == u'EXT_AUTHN_SUCCESS':
body = copy.deepcopy(body_template)
body[u'inFlightCtx'] = ret[u'data'][u'inFlightCtx']
# final request to get tokens
ret = self._post_request(
url, headers, json.dumps(body),
timeout=self._connection._login_timeout)
elif ret[u'data'].get(u'nextAction') and ret[u'data'][
u'nextAction'] == u'PWD_CHANGE':
if callable(password_callback):
body = copy.deepcopy(body_template)
body[u'inFlightCtx'] = ret[u'data'][u'inFlightCtx']
body[u'data'][u"LOGIN_NAME"] = user
body[u'data'][u"PASSWORD"] = password
body[u'data'][u'CHOSEN_NEW_PASSWORD'] = password_callback()
# New Password input
ret = self._post_request(
url, headers, json.dumps(body),
timeout=self._connection._login_timeout)
logger.debug(u'completed authentication')
if not ret[u'success']:
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': (u"failed to connect to DB: {host}:{port}, "
u"proxies={proxy_host}:{proxy_port}, "
u"proxy_user={proxy_user}, "
u"{message}").format(
host=self._host,
port=self._port,
proxy_host=self._proxy_host,
proxy_port=self._proxy_port,
proxy_user=self._proxy_user,
message=ret[u'message'],
),
u'errno': ER_FAILED_TO_CONNECT_TO_DB,
u'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
})
else:
self._token = ret[u'data'][u'token']
self._master_token = ret[u'data'][u'masterToken']
logger.debug(u'token = %s', self._token)
logger.debug(u'master_token = %s', self._master_token)
if u'sessionId' in ret[u'data']:
self._connection._session_id = ret[u'data'][u'sessionId']
if u'sessionInfo' in ret[u'data']:
session_info = ret[u'data'][u'sessionInfo']
if u'databaseName' in session_info:
self._connection._database = session_info[u'databaseName']
if u'schemaName' in session_info:
self._connection.schema = session_info[u'schemaName']
if u'roleName' in session_info:
self._connection._role = session_info[u'roleName']
if u'warehouseName' in session_info:
self._connection._warehouse = session_info[u'warehouseName']
def request(self, url, body=None, method=u'post', client=u'sfsql',
_no_results=False):
if body is None:
body = {}
if not hasattr(self, u'_master_token'):
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': u"Connection is closed",
u'errno': ER_CONNECTION_IS_CLOSED,
u'sqlstate': SQLSTATE_CONNECTION_NOT_EXISTS,
})
if client == u'sfsql':
accept_type = ACCEPT_TYPE_APPLICATION_SNOWFLAKE
else:
accept_type = CONTENT_TYPE_APPLICATION_JSON
headers = {
u'Content-Type': CONTENT_TYPE_APPLICATION_JSON,
u"accept": accept_type,
u"User-Agent": PYTHON_CONNECTOR_USER_AGENT,
}
if method == u'post':
return self._post_request(
url, headers, json.dumps(body),
token=self._token, _no_results=_no_results,
timeout=self._connection._network_timeout)
else:
return self._get_request(
url, headers, token=self._token,
timeout=self._connection._network_timeout)
def _renew_session(self):
if not hasattr(self, u'_master_token'):
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': u"Connection is closed",
u'errno': ER_CONNECTION_IS_CLOSED,
u'sqlstate': SQLSTATE_CONNECTION_NOT_EXISTS,
})
logger.debug(u'updating session')
logger.debug(u'master_token: %s', self._master_token)
headers = {
u'Content-Type': CONTENT_TYPE_APPLICATION_JSON,
u"accept": CONTENT_TYPE_APPLICATION_JSON,
u"User-Agent": PYTHON_CONNECTOR_USER_AGENT,
}
request_id = TO_UNICODE(uuid.uuid4())
logger.debug(u'request_id: %s', request_id)
url = u'/session/token-request?' + urlencode({
u'requestId': request_id})
body = {
u"oldSessionToken": self._token,
u"requestType": REQUEST_TYPE_RENEW,
}
self._session = None # invalidate session object
ret = self._post_request(
url, headers, json.dumps(body),
token=self._master_token,
timeout=self._connection._network_timeout)
if ret[u'success'] and u'data' in ret \
and u'sessionToken' in ret[u'data']:
logger.debug(u'success: %s', ret)
self._token = ret[u'data'][u'sessionToken']
self._master_token = ret[u'data'][u'masterToken']
logger.debug(u'updating session completed')
return ret
else:
logger.debug(u'failed: %s', ret)
err = ret[u'message']
if u'data' in ret and u'errorMessage' in ret[u'data']:
err += ret[u'data'][u'errorMessage']
Error.errorhandler_wrapper(
self._connection, None, ProgrammingError,
{
u'msg': err,
u'errno': ER_FAILED_TO_RENEW_SESSION,
u'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
})
def _delete_session(self):
if not hasattr(self, u'_master_token'):
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': u"Connection is closed",
u'errno': ER_CONNECTION_IS_CLOSED,
u'sqlstate': SQLSTATE_CONNECTION_NOT_EXISTS,
})
url = u'/session?' + urlencode({u'delete': u'true'})
headers = {
u'Content-Type': CONTENT_TYPE_APPLICATION_JSON,
u"accept": CONTENT_TYPE_APPLICATION_JSON,
u"User-Agent": PYTHON_CONNECTOR_USER_AGENT,
}
body = {}
try:
ret = self._post_request(
url, headers, json.dumps(body),
token=self._token, timeout=5, is_single_thread=True)
if not ret or ret.get(u'success'):
return
err = ret[u'message']
if ret.get(u'data') and ret[u'data'].get(u'errorMessage'):
err += ret[u'data'][u'errorMessage']
# no exception is raised
except Exception as e:
logger.debug('error in deleting session. ignoring...: %s', e)
def _get_request(self, url, headers, token=None, timeout=None):
if 'Content-Encoding' in headers:
del headers['Content-Encoding']
if 'Content-Length' in headers:
del headers['Content-Length']
full_url = u'{protocol}://{host}:{port}{url}'.format(
protocol=self._protocol,
host=self._host,
port=self._port,
url=url,
)
ret = self.fetch(u'get', full_url, headers, timeout=timeout,
token=token)
if u'code' in ret and ret[u'code'] == SESSION_EXPIRED_GS_CODE:
ret = self._renew_session()
logger.debug(
u'ret[code] = {code} after renew_session'.format(
code=(ret[u'code'] if u'code' in ret else u'N/A')))
if u'success' in ret and ret[u'success']:
return self._get_request(url, headers, token=self._token)
return ret
def _post_request(self, url, headers, body, token=None,
timeout=None, _no_results=False, is_single_thread=False):
full_url = u'{protocol}://{host}:{port}{url}'.format(
protocol=self._protocol,
host=self._host,
port=self._port,
url=url,
)
ret = self.fetch(u'post', full_url, headers, data=body,
timeout=timeout, token=token,
is_single_thread=is_single_thread)
logger.debug(
u'ret[code] = {code}, after post request'.format(
code=(ret.get(u'code', u'N/A'))))
if u'code' in ret and ret[u'code'] == SESSION_EXPIRED_GS_CODE:
ret = self._renew_session()
logger.debug(
u'ret[code] = {code} after renew_session'.format(
code=(ret[u'code'] if u'code' in ret else u'N/A')))
if u'success' in ret and ret[u'success']:
return self._post_request(
url, headers, body, token=self._token, timeout=timeout)
is_session_renewed = False
result_url = None
if u'code' in ret and ret[
u'code'] == QUERY_IN_PROGRESS_ASYNC_CODE and _no_results:
return ret
while is_session_renewed or u'code' in ret and ret[u'code'] in \
(QUERY_IN_PROGRESS_CODE, QUERY_IN_PROGRESS_ASYNC_CODE):
if self._injectClientPause > 0:
logger.debug(
u'waiting for {inject_client_pause}...'.format(
inject_client_pause=self._injectClientPause))
time.sleep(self._injectClientPause)
# ping pong
result_url = ret[u'data'][
u'getResultUrl'] if not is_session_renewed else result_url
logger.debug(u'ping pong starting...')
ret = self._get_request(
result_url, headers, token=self._token, timeout=timeout)
logger.debug(
u'ret[code] = %s',
ret[u'code'] if u'code' in ret else u'N/A')
logger.debug(u'ping pong done')
if u'code' in ret and ret[u'code'] == SESSION_EXPIRED_GS_CODE:
ret = self._renew_session()
logger.debug(
u'ret[code] = %s after renew_session',
ret[u'code'] if u'code' in ret else u'N/A')
if u'success' in ret and ret[u'success']:
is_session_renewed = True
else:
is_session_renewed = False
return ret
def fetch(self, method, full_url, headers, data=None, timeout=None,
**kwargs):
""" Curried API request with session management. """
if timeout is not None and 'timeouts' in kwargs:
raise TypeError("Mutually exclusive args: timeout, timeouts")
if timeout is None:
timeout = self._request_timeout
timeouts = kwargs.pop('timeouts', (self._connect_timeout,
self._connect_timeout, timeout))
proxies = set_proxies(self._proxy_host, self._proxy_port,
self._proxy_user, self._proxy_password)
with self._use_requests_session() as session:
return self._fetch(session, method, full_url, headers, data,
proxies, timeouts, **kwargs)
def _fetch(self, session, method, full_url, headers, data, proxies,
timeouts=(DEFAULT_CONNECT_TIMEOUT, DEFAULT_CONNECT_TIMEOUT,
DEFAULT_REQUEST_TIMEOUT),
token=NO_TOKEN,
is_raw_text=False,
catch_okta_unauthorized_error=False,
is_raw_binary=False,
is_raw_binary_iterator=True,
use_ijson=False, is_single_thread=False):
""" This is the lowest level of HTTP handling. All arguments culminate
here and the `requests.request` is issued and monitored from this
call using an inline thread for timeout monitoring. """
connection_timeout = timeouts[0:2]
request_timeout = timeouts[2] # total request timeout
request_exec_timeout = 60 # one request thread timeout
conn = self._connection
proxies = set_proxies(conn.rest._proxy_host, conn.rest._proxy_port,
conn.rest._proxy_user, conn.rest._proxy_password)
def request_exec(result_queue):
try:
if not catch_okta_unauthorized_error and data and len(data) > 0:
gzdata = BytesIO()
gzip.GzipFile(fileobj=gzdata, mode=u'wb').write(
data.encode(u'utf-8'))
gzdata.seek(0, 0)
headers['Content-Encoding'] = 'gzip'
input_data = gzdata
else:
input_data = data
raw_ret = session.request(
method=method,
url=full_url,
proxies=proxies,
headers=headers,
data=input_data,
timeout=connection_timeout,
verify=True,
stream=is_raw_binary,
auth=SnowflakeAuth(token),
)
if raw_ret.status_code == OK:
logger.debug(u'SUCCESS')
if is_raw_text:
ret = raw_ret.text
elif is_raw_binary:
raw_data = decompress_raw_data(
raw_ret.raw, add_bracket=True
).decode('utf-8', 'replace')
if not is_raw_binary_iterator:
ret = json.loads(raw_data)
elif not use_ijson:
ret = iter(json.loads(raw_data))
else:
ret = split_rows_from_stream(StringIO(raw_data))
else:
ret = raw_ret.json()
result_queue.put((ret, False))
elif raw_ret.status_code in STATUS_TO_EXCEPTION:
# retryable exceptions
result_queue.put(
(STATUS_TO_EXCEPTION[raw_ret.status_code](), True))
elif raw_ret.status_code == UNAUTHORIZED and \
catch_okta_unauthorized_error:
# OKTA Unauthorized errors
result_queue.put(
(DatabaseError(
msg=(u'Failed to get '
u'authentication by OKTA: '
u'{status}: {reason}'.format(
status=raw_ret.status_code,
reason=raw_ret.reason,
)),
errno=ER_FAILED_TO_CONNECT_TO_DB,
sqlstate=SQLSTATE_CONNECTION_REJECTED),
False))
else:
result_queue.put(
(InterfaceError(
msg=(u"{status} {reason}: "
u"{method} {url}").format(
status=raw_ret.status_code,
reason=raw_ret.reason,
method=method,
url=full_url),
errno=ER_FAILED_TO_REQUEST,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
), False))
except (BadStatusLine,
SSLError,
ProtocolError,
OpenSSL.SSL.SysCallError,
ValueError,
RuntimeError) as err:
logger.exception('who is hitting error?')
logger.debug(err)
if not isinstance(err, OpenSSL.SSL.SysCallError) or \
err.args[0] in (
errno.ECONNRESET,
errno.ETIMEDOUT,
errno.EPIPE,
-1):
result_queue.put((err, True))
else:
# all other OpenSSL errors are not retryable
result_queue.put((err, False))
except ConnectionError as err:
logger.exception(u'ConnectionError: %s', err)
result_queue.put((OperationalError(
# no full_url is required in the message
# as err includes all information
msg=u'Failed to connect: {0}'.format(err),
errno=ER_FAILED_TO_SERVER,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
), False))
if is_single_thread:
# This is dedicated code for DELETE SESSION when Python exists.
request_result_queue = Queue()
request_exec(request_result_queue)
try:
# don't care about the return value, because no retry and
# no error will show up
_, _ = request_result_queue.get(timeout=request_timeout)
except:
pass
return {}
retry_cnt = 0
while True:
return_object = None
request_result_queue = Queue()
th = Thread(name='RequestExec-%d' % next(self._request_count),
target=request_exec, args=(request_result_queue,))
th.daemon = True
th.start()
try:
logger.debug('request thread timeout: %s, '
'rest of request timeout: %s, '
'retry cnt: %s',
request_exec_timeout,
request_timeout,
retry_cnt + 1)
start_request_thread = time.time()
th.join(timeout=request_exec_timeout)
logger.debug('request thread joined')
if request_timeout is not None:
request_timeout -= min(
int(time.time() - start_request_thread),
request_timeout)
start_get_queue = time.time()
return_object, retryable = request_result_queue.get(
timeout=int(request_exec_timeout / 4))
if request_timeout is not None:
request_timeout -= min(
int(time.time() - start_get_queue), request_timeout)
logger.debug('request thread returned object')
if retryable:
raise RequestRetry()
elif isinstance(return_object, Error):
Error.errorhandler_wrapper(conn, None, return_object)
elif isinstance(return_object, Exception):
Error.errorhandler_wrapper(
conn, None, OperationalError,
{
u'msg': u'Failed to execute request: {0}'.format(
return_object),
u'errno': ER_FAILED_TO_REQUEST,
})
break
except (RequestRetry, AttributeError, EmptyQueue) as e:
# RequestRetry is raised in case of retryable error
# Empty is raised if the result queue is empty
if request_timeout is not None:
sleeping_time = min(2 ** retry_cnt,
min(request_timeout, 16))
else:
sleeping_time = min(2 ** retry_cnt, 16)
if sleeping_time <= 0:
# no more sleeping time
break
if request_timeout is not None:
request_timeout -= sleeping_time
logger.info(
u'retrying: errorclass=%s, '
u'error=%s, '
u'return_object=%s, '
u'counter=%s, '
u'sleeping=%s(s)',
type(e),
e,
return_object,
retry_cnt + 1,
sleeping_time)
time.sleep(sleeping_time)
retry_cnt += 1
if return_object is None:
if data:
try:
decoded_data = json.loads(data)
if decoded_data.get(
'data') and decoded_data['data'].get('PASSWORD'):
# masking the password
decoded_data['data']['PASSWORD'] = '********'
data = json.dumps(decoded_data)
except:
logger.info("data is not JSON")
logger.error(
u'Failed to get the response. Hanging? '
u'method: {method}, url: {url}, headers:{headers}, '
u'data: {data}, proxies: {proxies}'.format(
method=method,
url=full_url,
headers=headers,
data=data,
proxies=proxies
)
)
Error.errorhandler_wrapper(
conn, None, OperationalError,
{
u'msg': u'Failed to get the response. Hanging? '
u'method: {method}, url: {url}, '
u'proxies: {proxies}'.format(
method=method,
url=full_url,
proxies=proxies
),
u'errno': ER_FAILED_TO_REQUEST,
})
elif isinstance(return_object, Error):
Error.errorhandler_wrapper(conn, None, return_object)
elif isinstance(return_object, Exception):
Error.errorhandler_wrapper(
conn, None, OperationalError,
{
u'msg': u'Failed to execute request: {0}'.format(
return_object),
u'errno': ER_FAILED_TO_REQUEST,
})
return return_object
def make_requests_session(self):
s = requests.Session()
s.mount(u'http://', HTTPAdapter(max_retries=REQUESTS_RETRY))
s.mount(u'https://', HTTPAdapter(max_retries=REQUESTS_RETRY))
s._reuse_count = itertools.count()
return s
def authenticate_by_saml(self, authenticator, account, user, password):
u"""
SAML Authentication
1. query GS to obtain IDP token and SSO url
2. IMPORTANT Client side validation:
validate both token url and sso url contains same prefix
(protocol + host + port) as the given authenticator url.
Explanation:
This provides a way for the user to 'authenticate' the IDP it is
sending his/her credentials to. Without such a check, the user could
be coerced to provide credentials to an IDP impersonator.
3. query IDP token url to authenticate and retrieve access token
4. given access token, query IDP URL snowflake app to get SAML response
5. IMPORTANT Client side validation:
validate the post back url come back with the SAML response
contains the same prefix as the Snowflake's server url, which is the
intended destination url to Snowflake.
Explanation:
This emulates the behavior of IDP initiated login flow in the user
browser where the IDP instructs the browser to POST the SAML
assertion to the specific SP endpoint. This is critical in
preventing a SAML assertion issued to one SP from being sent to
another SP.
"""
logger.info(u'authenticating by SAML')
logger.debug(u'step 1: query GS to obtain IDP token and SSO url')
headers = {
u'Content-Type': CONTENT_TYPE_APPLICATION_JSON,
u"accept": CONTENT_TYPE_APPLICATION_JSON,
u"User-Agent": PYTHON_CONNECTOR_USER_AGENT,
}
url = u"/session/authenticator-request"
body = {
u'data': {
u"CLIENT_APP_ID": CLIENT_NAME,
u"CLIENT_APP_VERSION": CLIENT_VERSION,
u"SVN_REVISION": VERSION[3],
u"ACCOUNT_NAME": account,
u"AUTHENTICATOR": authenticator,
},
}
logger.debug(
u'account=%s, authenticator=%s',
account, authenticator,
)
ret = self._post_request(
url, headers, json.dumps(body),
timeout=self._connection._login_timeout)
if not ret[u'success']:
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': (u"Failed to connect to DB: {host}:{port}, "
u"proxies={proxy_host}:{proxy_port}, "
u"proxy_user={proxy_user}, "
u"{message}").format(
host=self._host,
port=self._port,
proxy_host=self._proxy_host,
proxy_port=self._proxy_port,
proxy_user=self._proxy_user,
message=ret[u'message'],
),
u'errno': ER_FAILED_TO_CONNECT_TO_DB,
u'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
})
data = ret[u'data']
token_url = data[u'tokenUrl']
sso_url = data[u'ssoUrl']
logger.debug(u'step 2: validate Token and SSO URL has the same prefix '
u'as authenticator')
if not _is_prefix_equal(authenticator, token_url) or \
not _is_prefix_equal(authenticator, sso_url):
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': (u"The specified authenticator is not supported: "
u"{authenticator}, token_url: {token_url}, "
u"sso_url: {sso_url}".format(
authenticator=authenticator,
token_url=token_url,
sso_url=sso_url,
)),
u'errno': ER_IDP_CONNECTION_ERROR,
u'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
logger.debug(u'step 3: query IDP token url to authenticate and '
u'retrieve access token')
data = {
u'username': user,
u'password': password,
}
ret = self.fetch(u'post', token_url, headers, data=json.dumps(data),
timeout=self._connection._login_timeout,
catch_okta_unauthorized_error=True)
one_time_token = ret[u'cookieToken']
logger.debug(u'step 4: query IDP URL snowflake app to get SAML '
u'response')
url_parameters = {
u'RelayState': u"/some/deep/link",
u'onetimetoken': one_time_token,
}
sso_url = sso_url + u'?' + urlencode(url_parameters)
headers = {
u"Accept": u'*/*',
}
response_html = self.fetch(u'get', sso_url, headers,
timeout=self._connection._login_timeout,
is_raw_text=True)
logger.debug(u'step 5: validate post_back_url matches Snowflake URL')
post_back_url = _get_post_back_url_from_html(response_html)
full_url = u'{protocol}://{host}:{port}'.format(
protocol=self._protocol,
host=self._host,
port=self._port,
)
if not _is_prefix_equal(post_back_url, full_url):
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': (u"The specified authenticator and destination "
u"URL in the SAML assertion do not match: "
u"expected: {url}, "
u"post back: {post_back_url}".format(
url=full_url,
post_back_url=post_back_url,
)),
u'errno': ER_INCORRECT_DESTINATION,
u'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
return response_html
@contextlib.contextmanager
def _use_requests_session(self):
""" Session caching context manager. Note that the session is not
closed until close() is called so each session may be used multiple
times. """
try:
session = self._idle_sessions.pop()
except IndexError:
session = self.make_requests_session()
self._active_sessions.add(session)
logger.info("Active requests sessions: %s, idle: %s",
len(self._active_sessions), len(self._idle_sessions))
try:
yield session
finally:
self._idle_sessions.appendleft(session)
try:
self._active_sessions.remove(session)
except KeyError:
logger.info(
"session doesn't exist in the active session pool. "
"Ignored...")
logger.info("Active requests sessions: %s, idle: %s",
len(self._active_sessions), len(self._idle_sessions))
| apache-2.0 | -5,605,493,169,981,819,000 | 40.409427 | 81 | 0.520009 | false | 4.300317 | false | false | false |
qnorsten/svtplay-dl | lib/svtplay_dl/service/oppetarkiv.py | 1 | 9577 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
import copy
import os
import hashlib
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.error import ServiceError
from svtplay_dl.log import log
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.dash import dashparse
from svtplay_dl.utils import ensure_unicode, filenamify, is_py2, decode_html_entities
from svtplay_dl.subtitle import subtitle
from svtplay_dl.utils.urllib import urlparse, parse_qs
from svtplay_dl.info import info
class OppetArkiv(Service, OpenGraphThumbMixin):
supported_domains = ['oppetarkiv.se']
def get(self):
vid = self.find_video_id()
if vid is None:
yield ServiceError("Cant find video id for this video")
return
url = "http://api.svt.se/videoplayer-api/video/%s" % vid
data = self.http.request("get", url)
if data.status_code == 404:
yield ServiceError("Can't get the json file for %s" % url)
return
data = data.json()
if "live" in data:
self.options.live = data["live"]
if self.options.output_auto:
self.options.service = "svtplay"
self.options.output = self.outputfilename(data, self.options.output, ensure_unicode(self.get_urldata()))
if self.exclude():
yield ServiceError("Excluding video")
return
parsed_info = self._parse_info(data)
if self.options.get_info:
if parsed_info:
yield info(copy.copy(self.options), parsed_info)
log.info("Collected info")
else:
log.info("Couldn't collect info for this episode")
if "subtitleReferences" in data:
for i in data["subtitleReferences"]:
if i["format"] == "websrt":
yield subtitle(copy.copy(self.options), "wrst", i["url"])
if len(data["videoReferences"]) == 0:
yield ServiceError("Media doesn't have any associated videos (yet?)")
return
for i in data["videoReferences"]:
parse = urlparse(i["url"])
query = parse_qs(parse.query)
if i["format"] == "hls" or i["format"] == "ios":
streams = hlsparse(self.options, self.http.request("get", i["url"]), i["url"])
if streams:
for n in list(streams.keys()):
yield streams[n]
if "alt" in query and len(query["alt"]) > 0:
alt = self.http.get(query["alt"][0])
if alt:
streams = hlsparse(self.options, self.http.request("get", alt.request.url), alt.request.url)
if streams:
for n in list(streams.keys()):
yield streams[n]
if i["format"] == "hds" or i["format"] == "flash":
match = re.search(r"\/se\/secure\/", i["url"])
if not match:
streams = hdsparse(self.options, self.http.request("get", i["url"], params={"hdcore": "3.7.0"}),
i["url"])
if streams:
for n in list(streams.keys()):
yield streams[n]
if "alt" in query and len(query["alt"]) > 0:
alt = self.http.get(query["alt"][0])
if alt:
streams = hdsparse(self.options,
self.http.request("get", alt.request.url, params={"hdcore": "3.7.0"}),
alt.request.url)
if streams:
for n in list(streams.keys()):
yield streams[n]
if i["format"] == "dash264" or i["format"] == "dashhbbtv":
streams = dashparse(self.options, self.http.request("get", i["url"]), i["url"])
if streams:
for n in list(streams.keys()):
yield streams[n]
if "alt" in query and len(query["alt"]) > 0:
alt = self.http.get(query["alt"][0])
if alt:
streams = dashparse(self.options, self.http.request("get", alt.request.url), alt.request.url)
if streams:
for n in list(streams.keys()):
yield streams[n]
def find_video_id(self):
match = re.search('data-video-id="([^"]+)"', self.get_urldata())
if match:
return match.group(1)
return None
def find_all_episodes(self, options):
page = 1
data = self.get_urldata()
match = re.search(r'"/etikett/titel/([^"/]+)', data)
if match is None:
match = re.search(r'"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url)
if match is None:
log.error("Couldn't find title")
return
program = match.group(1)
episodes = []
n = 0
if self.options.all_last > 0:
sort = "tid_fallande"
else:
sort = "tid_stigande"
while True:
url = "http://www.oppetarkiv.se/etikett/titel/%s/?sida=%s&sort=%s&embed=true" % (program, page, sort)
data = self.http.request("get", url)
if data.status_code == 404:
break
data = data.text
regex = re.compile(r'href="(/video/[^"]+)"')
for match in regex.finditer(data):
if n == self.options.all_last:
break
episodes.append("http://www.oppetarkiv.se%s" % match.group(1))
n += 1
page += 1
return episodes
def outputfilename(self, data, filename, raw):
directory = os.path.dirname(filename)
if is_py2:
id = hashlib.sha256(data["programVersionId"]).hexdigest()[:7]
else:
id = hashlib.sha256(data["programVersionId"].encode("utf-8")).hexdigest()[:7]
datatitle = re.search('data-title="([^"]+)"', self.get_urldata())
if not datatitle:
return None
datat = decode_html_entities(datatitle.group(1))
name = self.name(datat)
episode = self.seasoninfo(datat)
if is_py2:
name = name.encode("utf8")
if episode:
title = "{0}.{1}-{2}-svtplay".format(name, episode, id)
else:
title = "{0}-{1}-svtplay".format(name, id)
title = filenamify(title)
if len(directory):
output = os.path.join(directory, title)
else:
output = title
return output
def seasoninfo(self, data):
episode = None
match = re.search("S.song (\d+) - Avsnitt (\d+)", data)
if match:
episode = "s{0:02d}e{1:02d}".format(int(match.group(1)), int(match.group(2)))
else:
match = re.search("Avsnitt (\d+)", data)
if match:
episode = "e{0:02d}".format(int(match.group(1)))
return episode
def name(selfs, data):
if data.find(" - S.song") > 0:
title = data[:data.find(" - S.song")]
else:
if data.find(" - Avsnitt") > 0:
title = data[:data.find(" - Avsnitt")]
else:
title = data
return title
def _parse_info(self,json_data):
parsed_info = {}
data = self.get_urldata()
datatitle = re.search('data-title="([^"]+)"', self.get_urldata())
if not datatitle:
return None
datat = decode_html_entities(datatitle.group(1))
parsed_info["title"] = self.name(datat)
match = re.search("S.song (\d+) - Avsnitt (\d+)", datat)
if match:
parsed_info["season"] = match.group(1)
parsed_info["episode"] = match.group(2)
else:
match = re.search("Avsnitt (\d+)", datat)
if match:
parsed_info["episode"] = match.group(1)
meta = re.search("<span class=\"svt-video-meta\">([\w\W]+)<\/span>",data)
if meta:
broadcast_date = re.search("<time .+>(.+)</time>",meta.group(1))
duration = re.search("Längd <strong>(.+)<\/strong>",meta.group(1))
if duration:
parsed_info["duration"] = duration.group(1)
if broadcast_date:
parsed_info["broadcastDate"] = broadcast_date.group(1)
if "subtitleReferences" in json_data:
for i in json_data["subtitleReferences"]:
if i["format"] == "websrt":
parsed_info["subtitle"] = "True"
break
description = re.search("<div class=\"svt-text-bread\">([\w\W]+?)<\/div>",data)
if description:
description = decode_html_entities(description.group(1))
description = description.replace("<br />","\n").replace("<br>","\n").replace("\t","")
description = re.sub('<[^<]+?>', '', description)
parsed_info["description"] = description
return parsed_info
| mit | 8,521,074,660,413,249,000 | 38.570248 | 117 | 0.500104 | false | 3.958661 | false | false | false |
Pierre-Thibault/neo-insert-imports | test/result_dir/blank_lines_only.py | 1 | 6254 |
# Static analyzer import helpers: (STATIC_IMPORT_MARK)
if 0:
import gluon
global cache; cache = gluon.cache.Cache()
global LOAD; LOAD = gluon.compileapp.LoadFactory()
import gluon.compileapp.local_import_aux as local_import #@UnusedImport
from gluon.contrib.gql import GQLDB #@UnusedImport
from gluon.dal import Field #@UnusedImport
global request; request = gluon.globals.Request()
global response; response = gluon.globals.Response()
global session; session = gluon.globals.Session()
from gluon.html import A #@UnusedImport
from gluon.html import B #@UnusedImport
from gluon.html import BEAUTIFY #@UnusedImport
from gluon.html import BODY #@UnusedImport
from gluon.html import BR #@UnusedImport
from gluon.html import CENTER #@UnusedImport
from gluon.html import CODE #@UnusedImport
from gluon.html import DIV #@UnusedImport
from gluon.html import EM #@UnusedImport
from gluon.html import EMBED #@UnusedImport
from gluon.html import embed64 #@UnusedImport
from gluon.html import FIELDSET #@UnusedImport
from gluon.html import FORM #@UnusedImport
from gluon.html import H1 #@UnusedImport
from gluon.html import H2 #@UnusedImport
from gluon.html import H3 #@UnusedImport
from gluon.html import H4 #@UnusedImport
from gluon.html import H5 #@UnusedImport
from gluon.html import H6 #@UnusedImport
from gluon.html import HEAD #@UnusedImport
from gluon.html import HR #@UnusedImport
from gluon.html import HTML #@UnusedImport
from gluon.html import I #@UnusedImport
from gluon.html import IFRAME #@UnusedImport
from gluon.html import IMG #@UnusedImport
from gluon.html import INPUT #@UnusedImport
from gluon.html import LABEL #@UnusedImport
from gluon.html import LEGEND #@UnusedImport
from gluon.html import LI #@UnusedImport
from gluon.html import LINK #@UnusedImport
from gluon.html import MARKMIN #@UnusedImport
from gluon.html import MENU #@UnusedImport
from gluon.html import META #@UnusedImport
from gluon.html import OBJECT #@UnusedImport
from gluon.html import OL #@UnusedImport
from gluon.html import ON #@UnusedImport
from gluon.html import OPTGROUP #@UnusedImport
from gluon.html import OPTION #@UnusedImport
from gluon.html import P #@UnusedImport
from gluon.html import PRE #@UnusedImport
from gluon.html import STYLE #@UnusedImport
from gluon.html import SCRIPT #@UnusedImport
from gluon.html import SELECT #@UnusedImport
from gluon.html import SPAN #@UnusedImport
from gluon.html import TABLE #@UnusedImport
from gluon.html import TAG #@UnusedImport
from gluon.html import TBODY #@UnusedImport
from gluon.html import TD #@UnusedImport
from gluon.html import TEXTAREA #@UnusedImport
from gluon.html import TFOOT #@UnusedImport
from gluon.html import TH #@UnusedImport
from gluon.html import THEAD #@UnusedImport
from gluon.html import TITLE #@UnusedImport
from gluon.html import TR #@UnusedImport
from gluon.html import TT #@UnusedImport
from gluon.html import UL #@UnusedImport
from gluon.html import URL #@UnusedImport
from gluon.html import XHTML #@UnusedImport
from gluon.html import XML #@UnusedImport
from gluon.html import xmlescape #@UnusedImport
from gluon.http import HTTP #@UnusedImport
from gluon.http import redirect #@UnusedImport
import gluon.languages.translator as T #@UnusedImport
from gluon.sql import DAL
global db; db = DAL()
from gluon.sql import SQLDB #@UnusedImport
from gluon.sql import SQLField #@UnusedImport
from gluon.sqlhtml import SQLFORM #@UnusedImport
from gluon.sqlhtml import SQLTABLE #@UnusedImport
from gluon.tools import Auth
global auth; auth = Auth()
from gluon.tools import Crud
global crud; crud = Crud()
from gluon.tools import fetch #@UnusedImport
from gluon.tools import geocode #@UnusedImport
from gluon.tools import Mail
global mail; mail = Mail()
from gluon.tools import PluginManager
global plugins; plugins = PluginManager()
from gluon.tools import prettydate #@UnusedImport
from gluon.tools import Recaptcha #@UnusedImport
from gluon.tools import Service
global service; service = Service()
from gluon.validators import CLEANUP #@UnusedImport
from gluon.validators import CRYPT #@UnusedImport
from gluon.validators import IS_ALPHANUMERIC #@UnusedImport
from gluon.validators import IS_DATE #@UnusedImport
from gluon.validators import IS_DATE_IN_RANGE #@UnusedImport
from gluon.validators import IS_DATETIME #@UnusedImport
from gluon.validators import IS_DATETIME_IN_RANGE #@UnusedImport
from gluon.validators import IS_DECIMAL_IN_RANGE #@UnusedImport
from gluon.validators import IS_EMAIL #@UnusedImport
from gluon.validators import IS_EMPTY_OR #@UnusedImport
from gluon.validators import IS_EQUAL_TO #@UnusedImport
from gluon.validators import IS_EXPR #@UnusedImport
from gluon.validators import IS_FLOAT_IN_RANGE #@UnusedImport
from gluon.validators import IS_IMAGE #@UnusedImport
from gluon.validators import IS_IN_DB #@UnusedImport
from gluon.validators import IS_IN_SET #@UnusedImport
from gluon.validators import IS_INT_IN_RANGE #@UnusedImport
from gluon.validators import IS_IPV4 #@UnusedImport
from gluon.validators import IS_LENGTH #@UnusedImport
from gluon.validators import IS_LIST_OF #@UnusedImport
from gluon.validators import IS_LOWER #@UnusedImport
from gluon.validators import IS_MATCH #@UnusedImport
from gluon.validators import IS_NOT_EMPTY #@UnusedImport
from gluon.validators import IS_NOT_IN_DB #@UnusedImport
from gluon.validators import IS_NULL_OR #@UnusedImport
from gluon.validators import IS_SLUG #@UnusedImport
from gluon.validators import IS_STRONG #@UnusedImport
from gluon.validators import IS_TIME #@UnusedImport
from gluon.validators import IS_UPLOAD_FILENAME #@UnusedImport
from gluon.validators import IS_UPPER #@UnusedImport
from gluon.validators import IS_URL #@UnusedImport
| mit | 7,308,186,558,216,996,000 | 43.671429 | 75 | 0.738887 | false | 4.373427 | false | true | false |
fenglb/mysite | eguard/migrations/0002_auto_20160220_1853.py | 1 | 2429 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-20 10:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('eguard', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(verbose_name='\u65f6\u95f4')),
],
),
migrations.RemoveField(
model_name='user',
name='entrance',
),
migrations.RemoveField(
model_name='user',
name='group',
),
migrations.RemoveField(
model_name='entrance',
name='name',
),
migrations.RemoveField(
model_name='entrance',
name='position',
),
migrations.RemoveField(
model_name='entrance',
name='webId',
),
migrations.AddField(
model_name='entrance',
name='code',
field=models.CharField(choices=[(b'D500', '\u9760500M\u6838\u78c1\u5ba4\u95e8'), (b'D102', '\u503c\u73ed\u5ba4\u5927\u95e8'), (b'D103', '\u503c\u73ed\u5ba4\u91cc\u95e8'), (b'D600', '\u9760600M\u6838\u78c1\u5ba4\u95e8')], default=b'D102', max_length=4, verbose_name='\u95e8\u7981'),
),
migrations.AddField(
model_name='entrance',
name='users',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='\u51fa\u5165\u8005'),
),
migrations.DeleteModel(
name='Team',
),
migrations.DeleteModel(
name='User',
),
migrations.AddField(
model_name='event',
name='entrace',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='eguard.Entrance', verbose_name='\u95e8'),
),
migrations.AddField(
model_name='event',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u4eba'),
),
]
| cc0-1.0 | -6,512,004,272,363,192,000 | 33.211268 | 293 | 0.561136 | false | 3.731183 | false | false | false |
matthew-brett/draft-statsmodels | scikits/statsmodels/sandbox/tsa/tsatools.py | 1 | 4014 | import numpy as np
def lagmat(x, maxlag, trim='forward'):
'''create 2d array of lags
Parameters
----------
x : array_like, 1d or 2d
data; if 2d, observation in rows and variables in columns
maxlag : int
all lags from zero to maxlag are included
trim : str {'forward', 'backward', 'both', 'none'} or None
* 'forward' : trim invalid observations in front
* 'backward' : trim invalid initial observations
* 'both' : trim invalid observations on both sides
* 'none', None : no trimming of observations
Returns
-------
lagmat : 2d array
array with lagged observations
Examples
--------
>>> from scikits.statsmodels.sandbox.tsa.tsatools import lagmat
>>> import numpy as np
>>> X = np.arange(1,7).reshape(-1,2)
>>> lagmat(X, maxlag=2, trim="forward")
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="backward")
array([[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
>>> lagmat(X, maxlag=2, trim="both"
array([[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="none")
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
Notes
-----
TODO:
* allow list of lags additional to maxlag
* create varnames for columns
'''
x = np.asarray(x)
if x.ndim == 1:
x = x[:,None]
nobs, nvar = x.shape
if maxlag >= nobs:
raise ValueError("maxlag should be < nobs")
lm = np.zeros((nobs+maxlag, nvar*(maxlag+1)))
for k in range(0, int(maxlag+1)):
#print k, maxlag-k,nobs-k, nvar*k,nvar*(k+1), x.shape, lm.shape
lm[maxlag-k:nobs+maxlag-k, nvar*(maxlag-k):nvar*(maxlag-k+1)] = x
if trim:
trimlower = trim.lower()
else:
trimlower = trim
if trimlower == 'none' or not trimlower:
return lm
elif trimlower == 'forward':
return lm[:nobs+maxlag-k,:]
elif trimlower == 'both':
return lm[maxlag:nobs+maxlag-k,:]
elif trimlower == 'backward':
return lm[maxlag:,:]
else:
raise ValueError, 'trim option not valid'
def lagmat2ds(x, maxlag0, maxlagex=None, dropex=0, trim='forward'):
'''generate lagmatrix for 2d array, columns arranged by variables
Parameters
----------
x : array_like, 2d
2d data, observation in rows and variables in columns
maxlag0 : int
for first variable all lags from zero to maxlag are included
maxlagex : None or int
max lag for all other variables all lags from zero to maxlag are included
dropex : int (default is 0)
exclude first dropex lags from other variables
for all variables, except the first, lags from dropex to maxlagex are included
trim : string
* 'forward' : trim invalid observations in front
* 'backward' : trim invalid initial observations
* 'both' : trim invalid observations on both sides
* 'none' : no trimming of observations
Returns
-------
lagmat : 2d array
array with lagged observations, columns ordered by variable
Notes
-----
very inefficient for unequal lags, just done for convenience
'''
if maxlagex is None:
maxlagex = maxlag0
maxlag = max(maxlag0, maxlagex)
nobs, nvar = x.shape
lagsli = [lagmat(x[:,0], maxlag, trim=trim)[:,:maxlag0]]
for k in range(1,nvar):
lagsli.append(lagmat(x[:,k], maxlag, trim=trim)[:,dropex:maxlagex])
return np.column_stack(lagsli)
__all__ = ['lagmat', 'lagmat2ds']
if __name__ == '__main__':
# sanity check, mainly for imports
x = np.random.normal(size=(100,2))
tmp = lagmat(x,2)
tmp = lagmat2ds(x,2)
# grangercausalitytests(x, 2)
| bsd-3-clause | 6,150,457,829,898,565,000 | 31.112 | 86 | 0.555805 | false | 3.234488 | false | false | false |
EzyInsights/Diamond | src/collectors/nginx/nginx.py | 15 | 3235 | # coding=utf-8
"""
Collect statistics from Nginx
#### Dependencies
* urllib2
#### Usage
To enable the nginx status page to work with defaults,
add a file to /etc/nginx/sites-enabled/ (on Ubuntu) with the
following content:
<pre>
server {
listen 127.0.0.1:8080;
server_name localhost;
location /nginx_status {
stub_status on;
access_log /data/server/shared/log/access.log;
allow 127.0.0.1;
deny all;
}
}
</pre>
"""
import urllib2
import re
import diamond.collector
class NginxCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(NginxCollector, self).get_default_config_help()
config_help.update({
'req_host': 'Hostname',
'req_port': 'Port',
'req_path': 'Path',
})
return config_help
def get_default_config(self):
default_config = super(NginxCollector, self).get_default_config()
default_config['req_host'] = 'localhost'
default_config['req_port'] = 8080
default_config['req_path'] = '/nginx_status'
default_config['path'] = 'nginx'
return default_config
def collect(self):
url = 'http://%s:%i%s' % (self.config['req_host'],
int(self.config['req_port']),
self.config['req_path'])
activeConnectionsRE = re.compile(r'Active connections: (?P<conn>\d+)')
totalConnectionsRE = re.compile('^\s+(?P<conn>\d+)\s+' +
'(?P<acc>\d+)\s+(?P<req>\d+)')
connectionStatusRE = re.compile('Reading: (?P<reading>\d+) ' +
'Writing: (?P<writing>\d+) ' +
'Waiting: (?P<waiting>\d+)')
req = urllib2.Request(url)
try:
handle = urllib2.urlopen(req)
for l in handle.readlines():
l = l.rstrip('\r\n')
if activeConnectionsRE.match(l):
self.publish_gauge(
'active_connections',
int(activeConnectionsRE.match(l).group('conn')))
elif totalConnectionsRE.match(l):
m = totalConnectionsRE.match(l)
req_per_conn = float(m.group('req')) / \
float(m.group('acc'))
self.publish_counter('conn_accepted', int(m.group('conn')))
self.publish_counter('conn_handled', int(m.group('acc')))
self.publish_counter('req_handled', int(m.group('req')))
self.publish_gauge('req_per_conn', float(req_per_conn))
elif connectionStatusRE.match(l):
m = connectionStatusRE.match(l)
self.publish_gauge('act_reads', int(m.group('reading')))
self.publish_gauge('act_writes', int(m.group('writing')))
self.publish_gauge('act_waits', int(m.group('waiting')))
except IOError, e:
self.log.error("Unable to open %s" % url)
except Exception, e:
self.log.error("Unknown error opening url: %s", e)
| mit | 779,631,892,434,717,700 | 35.348315 | 79 | 0.520247 | false | 3.954768 | true | false | false |
Quantipy/quantipy | savReaderWriter/__init__.py | 1 | 4643 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
savReaderWriter: A cross-platform Python interface to the IBM SPSS
Statistics Input Output Module. Read or Write SPSS system files (.sav, .zsav)
.. moduleauthor:: Albert-Jan Roskam <fomcl "at" yahoo "dot" com>
"""
# change this to 'True' in case you experience segmentation
# faults related to freeing memory.
segfaults = False
import os
import sys
try:
import psyco
psycoOk = True # reading 66 % faster
except ImportError:
psycoOk = False
try:
import numpy
numpyOk = True
except ImportError:
numpyOk = False
try:
from cWriterow import cWriterow # writing 66 % faster
cWriterowOK = True
except ImportError:
cWriterowOK = False
__author__ = "Albert-Jan Roskam" + " " + "@".join(["fomcl", "yahoo.com"])
__version__ = open(os.path.join(os.path.dirname(__file__),
"VERSION")).read().strip()
allFormats = {
1: (b"SPSS_FMT_A", b"Alphanumeric"),
2: (b"SPSS_FMT_AHEX", b"Alphanumeric hexadecimal"),
3: (b"SPSS_FMT_COMMA", b"F Format with commas"),
4: (b"SPSS_FMT_DOLLAR", b"Commas and floating dollar sign"),
5: (b"SPSS_FMT_F", b"Default Numeric Format"),
6: (b"SPSS_FMT_IB", b"Integer binary"),
7: (b"SPSS_FMT_PIBHEX", b"Positive integer binary - hex"),
8: (b"SPSS_FMT_P", b"Packed decimal"),
9: (b"SPSS_FMT_PIB", b"Positive integer binary unsigned"),
10: (b"SPSS_FMT_PK", b"Positive integer binary unsigned"),
11: (b"SPSS_FMT_RB", b"Floating point binary"),
12: (b"SPSS_FMT_RBHEX", b"Floating point binary hex"),
15: (b"SPSS_FMT_Z", b"Zoned decimal"),
16: (b"SPSS_FMT_N", b"N Format- unsigned with leading 0s"),
17: (b"SPSS_FMT_E", b"E Format- with explicit power of 10"),
20: (b"SPSS_FMT_DATE", b"Date format dd-mmm-yyyy"),
21: (b"SPSS_FMT_TIME", b"Time format hh:mm:ss.s"),
22: (b"SPSS_FMT_DATETIME", b"Date and Time"),
23: (b"SPSS_FMT_ADATE", b"Date format dd-mmm-yyyy"),
24: (b"SPSS_FMT_JDATE", b"Julian date - yyyyddd"),
25: (b"SPSS_FMT_DTIME", b"Date-time dd hh:mm:ss.s"),
26: (b"SPSS_FMT_WKDAY", b"Day of the week"),
27: (b"SPSS_FMT_MONTH", b"Month"),
28: (b"SPSS_FMT_MOYR", b"mmm yyyy"),
29: (b"SPSS_FMT_QYR", b"q Q yyyy"),
30: (b"SPSS_FMT_WKYR", b"ww WK yyyy"),
31: (b"SPSS_FMT_PCT", b"Percent - F followed by %"),
32: (b"SPSS_FMT_DOT", b"Like COMMA, switching dot for comma"),
33: (b"SPSS_FMT_CCA", b"User Programmable currency format"),
34: (b"SPSS_FMT_CCB", b"User Programmable currency format"),
35: (b"SPSS_FMT_CCC", b"User Programmable currency format"),
36: (b"SPSS_FMT_CCD", b"User Programmable currency format"),
37: (b"SPSS_FMT_CCE", b"User Programmable currency format"),
38: (b"SPSS_FMT_EDATE", b"Date in dd/mm/yyyy style"),
39: (b"SPSS_FMT_SDATE", b"Date in yyyy/mm/dd style")}
MAXLENGTHS = {
"SPSS_MAX_VARNAME": (64, "Variable name"),
"SPSS_MAX_SHORTVARNAME": (8, "Short (compatibility) variable name"),
"SPSS_MAX_SHORTSTRING": (8, "Short string variable"),
"SPSS_MAX_IDSTRING": (64, "File label string"),
"SPSS_MAX_LONGSTRING": (32767, "Long string variable"),
"SPSS_MAX_VALLABEL": (120, "Value label"),
"SPSS_MAX_VARLABEL": (256, "Variable label"),
"SPSS_MAX_7SUBTYPE": (40, "Maximum record 7 subtype"),
"SPSS_MAX_ENCODING": (64, "Maximum encoding text")}
supportedDates = { # uses ISO dates wherever applicable.
b"DATE": "%Y-%m-%d",
b"JDATE": "%Y-%m-%d",
b"EDATE": "%Y-%m-%d",
b"SDATE": "%Y-%m-%d",
b"DATETIME": "%Y-%m-%d %H:%M:%S",
b"ADATE": "%Y-%m-%d",
b"WKDAY": "%A",
b"MONTH": "%B",
b"MOYR": "%B %Y",
b"WKYR": "%W WK %Y",
b"QYR": "%m Q %Y", # %m (month) is converted to quarter, see next dict.
b"TIME": "%H:%M:%S.%f",
b"DTIME": "%d %H:%M:%S"}
QUARTERS = {b'01': b'1', b'02': b'1', b'03': b'1',
b'04': b'2', b'05': b'2', b'06': b'2',
b'07': b'3', b'08': b'3', b'09': b'3',
b'10': b'4', b'11': b'4', b'12': b'4'}
userMissingValues = {
"SPSS_NO_MISSVAL": 0,
"SPSS_ONE_MISSVAL": 1,
"SPSS_TWO_MISSVAL": 2,
"SPSS_THREE_MISSVAL": 3,
"SPSS_MISS_RANGE": -2,
"SPSS_MISS_RANGEANDVAL": -3}
version = __version__
sys.path.insert(0, os.path.dirname(__file__))
from py3k import *
from error import *
from generic import *
from header import *
from savReader import *
from savWriter import *
from savHeaderReader import *
__all__ = ["SavReader", "SavWriter", "SavHeaderReader"]
| mit | 8,820,088,647,539,985,000 | 34.273438 | 77 | 0.58432 | false | 2.696283 | false | false | false |
henniggroup/GASP-python | gasp/post_processing/plotter.py | 1 | 6437 | # coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
"""
Plotter module:
This module contains the Plotter class, which is used to plot various data
from the genetic algorithm structure search.
"""
from pymatgen.core.composition import Composition
from pymatgen.phasediagram.entries import PDEntry
from pymatgen.phasediagram.maker import CompoundPhaseDiagram
from pymatgen.phasediagram.plotter import PDPlotter
import matplotlib.pyplot as plt
import os
class Plotter(object):
"""
Used to to plot various data from a structure search.
"""
def __init__(self, data_file_path):
"""
Makes a Plotter.
Args:
data_file_path: the path to file (called run_data) containing the
data for the search
"""
# get the input file contents
input_file = os.path.abspath(data_file_path)
try:
with open(input_file) as input_data:
self.lines = input_data.readlines()
except:
print('Error reading data file.')
print('Quitting...')
quit()
def get_progress_plot(self):
"""
Returns a plot of the best value versus the number of energy
calculations, as a matplotlib plot object.
"""
# set the font to Times, rendered with Latex
plt.rc('font', **{'family': 'serif', 'serif': ['Times']})
plt.rc('text', usetex=True)
# parse the number of composition space endpoints
endpoints_line = self.lines[0].split()
endpoints = []
for word in endpoints_line[::-1]:
if word == 'endpoints:':
break
else:
endpoints.append(word)
num_endpoints = len(endpoints)
if num_endpoints == 1:
y_label = r'Best value (eV/atom)'
elif num_endpoints == 2:
y_label = r'Area of convex hull'
else:
y_label = r'Volume of convex hull'
# parse the best values and numbers of energy calculations
best_values = []
num_calcs = []
for i in range(4, len(self.lines)):
line = self.lines[i].split()
num_calcs.append(int(line[4]))
best_values.append(line[5])
# check for None best values
none_indices = []
for value in best_values:
if value == 'None':
none_indices.append(best_values.index(value))
for index in none_indices:
del best_values[index]
del num_calcs[index]
# make the plot
plt.plot(num_calcs, best_values, color='blue', linewidth=2)
plt.xlabel(r'Number of energy calculations', fontsize=22)
plt.ylabel(y_label, fontsize=22)
plt.tick_params(which='both', width=1, labelsize=18)
plt.tick_params(which='major', length=8)
plt.tick_params(which='minor', length=4)
plt.xlim(xmin=0)
plt.tight_layout()
return plt
def plot_progress(self):
"""
Plots the best value versus the number of energy calculations.
"""
self.get_progress_plot().show()
def get_system_size_plot(self):
"""
Returns a plot of the system size versus the number of energy
calculations, as a matplotlib plot object.
"""
# set the font to Times, rendered with Latex
plt.rc('font', **{'family': 'serif', 'serif': ['Times']})
plt.rc('text', usetex=True)
# parse the compositions and numbers of energy calculations
compositions = []
num_calcs = []
for i in range(4, len(self.lines)):
line = self.lines[i].split()
compositions.append(line[1])
num_calcs.append(int(line[4]))
# get the numbers of atoms from the compositions
nums_atoms = []
for composition in compositions:
comp = Composition(composition)
nums_atoms.append(comp.num_atoms)
# make the plot
plt.plot(num_calcs, nums_atoms, 'D', markersize=5,
markeredgecolor='blue', markerfacecolor='blue')
plt.xlabel(r'Number of energy calculations', fontsize=22)
plt.ylabel(r'Number of atoms in the cell', fontsize=22)
plt.tick_params(which='both', width=1, labelsize=18)
plt.tick_params(which='major', length=8)
plt.tick_params(which='minor', length=4)
plt.xlim(xmin=0)
plt.ylim(ymin=0)
plt.tight_layout()
return plt
def plot_system_size(self):
"""
Plots the system size versus the number of energy calculations.
"""
self.get_system_size_plot().show()
def get_phase_diagram_plot(self):
"""
Returns a phase diagram plot, as a matplotlib plot object.
"""
# set the font to Times, rendered with Latex
plt.rc('font', **{'family': 'serif', 'serif': ['Times']})
plt.rc('text', usetex=True)
# parse the composition space endpoints
endpoints_line = self.lines[0].split()
endpoints = []
for word in endpoints_line[::-1]:
if word == 'endpoints:':
break
else:
endpoints.append(Composition(word))
if len(endpoints) < 2:
print('There must be at least 2 endpoint compositions to make a '
'phase diagram.')
quit()
# parse the compositions and total energies of all the structures
compositions = []
total_energies = []
for i in range(4, len(self.lines)):
line = self.lines[i].split()
compositions.append(Composition(line[1]))
total_energies.append(float(line[2]))
# make a list of PDEntries
pdentries = []
for i in range(len(compositions)):
pdentries.append(PDEntry(compositions[i], total_energies[i]))
# make a CompoundPhaseDiagram
compound_pd = CompoundPhaseDiagram(pdentries, endpoints)
# make a PhaseDiagramPlotter
pd_plotter = PDPlotter(compound_pd, show_unstable=100)
return pd_plotter.get_plot(label_unstable=False)
def plot_phase_diagram(self):
"""
Plots the phase diagram.
"""
self.get_phase_diagram_plot().show()
| mit | -8,962,152,829,643,050,000 | 30.70936 | 77 | 0.580239 | false | 4.033208 | false | false | false |
PmagPy/PmagPy | programs/conversion_scripts/iodp_jr6_magic.py | 1 | 2644 | #!/usr/bin/env python
"""
NAME
iodp_jr6_magic.py
DESCRIPTION
converts shipboard .jr6 format files to measurements format files
This program assumes that you have created the specimens, samples, sites and location
files using convert_2_magic.iodp_samples_csv from files downloaded from the LIMS online
repository and that all samples are in that file. (See iodp_samples_magic.py).
SYNTAX
iodp_jr6_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-ID: directory for input file if not included in -f flag
-f FILE: specify input .csv file, default is all in directory
-WD: directory to output files to (default : current directory)
-F FILE: specify output measurements file, default is measurements.txt
-Fsp FILE: specify output specimens.txt file, default is specimens.txt
-lat LAT: latitude of site (also used as bounding latitude for location)
-lon LON: longitude of site (also used as bounding longitude for location)
-A: don't average replicate measurements
-v NUM: volume in cc, will be used if there is no volume in the input data (default : 12cc (rounded one inch diameter core, one inch length))
-dc FLOAT: if ARM measurements are in the file, this was the DC bias field applied
INPUT
JR6 .jr6 format file
"""
import sys
from pmagpy import convert_2_magic as convert
from pmagpy import pmag
def do_help():
return __doc__
def main():
kwargs = {}
# get command line arguments
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
kwargs['dir_path'] = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
kwargs['input_dir_path'] = sys.argv[ind+1]
if "-h" in sys.argv:
help(__name__)
sys.exit()
if '-F' in sys.argv:
ind = sys.argv.index("-F")
kwargs['meas_file'] = sys.argv[ind+1]
if '-Fsp' in sys.argv:
ind = sys.argv.index("-Fsp")
kwargs['spec_file'] = sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index("-f")
kwargs['jr6_file'] = sys.argv[ind+1]
if "-A" in sys.argv:
kwargs['noave'] = True
if "-lat" in sys.argv:
ind = sys.argv.index("-lat")
kwargs['lat'] = sys.argv[ind+1]
if "-lon" in sys.argv:
ind = sys.argv.index("-lon")
kwargs['lon'] = sys.argv[ind+1]
if "-v" in sys.argv:
ind = sys.argv.index("-v")
kwargs['volume'] = sys.argv[ind+1]
kwargs['dc_field'] = pmag.get_named_arg('-dc', default_val=50e-6)
# do conversion
convert.iodp_jr6_lore(**kwargs)
if __name__ == '__main__':
main()
| bsd-3-clause | 5,809,550,570,063,431,000 | 32.897436 | 145 | 0.631997 | false | 3.372449 | false | false | false |
gnotaras/wordpress-add-meta-tags | make_release.py | 1 | 5082 | #
# This script is intentionally a mess. This is not meant to be used by you, folks.
#
# Copyright George Notaras
REL_FILES = [
'add-meta-tags.pot',
'add-meta-tags.php',
'amt-cli.php',
'amt-admin-panel.php',
'amt-settings.php',
'amt-template-tags.php',
'amt-utils.php',
'amt-embed.php',
'index.php',
'AUTHORS',
#'CONTRIBUTORS',
'LICENSE',
'NOTICE',
'README.rst',
'readme.txt',
# 'screenshot-1.png',
# 'screenshot-2.png',
# 'screenshot-3.png',
# 'screenshot-4.png',
'uninstall.php',
'wpml-config.xml',
]
REL_DIRS = [
'templates',
'metadata',
# 'languages',
# 'languages-contrib',
'css',
'js',
]
PLUGIN_METADATA_FILE = 'add-meta-tags.php'
POT_HEADER = """# POT (Portable Object Template)
#
# This file is part of the Add-Meta-Tags plugin for WordPress.
#
# Read more information about the Add-Meta-Tags translations at:
#
# http://www.codetrax.org/projects/wp-add-meta-tags/wiki/Translations
#
# Copyright (C) 2006-2016 George Notaras <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
# ==============================================================================
import sys
import os
import glob
import zipfile
import shutil
import subprocess
import polib
def get_name_release():
def get_data(cur_line):
return cur_line.split(':')[1].strip()
f = open(PLUGIN_METADATA_FILE)
name = ''
release = ''
for line in f:
if line.lower().startswith('plugin name:'):
name = get_data(line)
elif line.lower().startswith('version:'):
release = get_data(line)
if name and release:
break
f.close()
if not name:
raise Exception('Cannot determine plugin name')
elif not release:
raise Exception('Cannot determine plugin version')
else:
# Replace spaces in name and convert it to lowercase
name = name.replace(' ', '-')
name = name.lower()
return name, release
name, release = get_name_release()
print 'Generating POT file...'
# Translation
pot_domain = os.path.splitext(PLUGIN_METADATA_FILE)[0]
# Generate POT file
args = ['xgettext', '--default-domain=%s' % pot_domain, '--output=%s.pot' % pot_domain, '--language=PHP', '--from-code=UTF-8', '--keyword=__', '--keyword=_e', '--no-wrap', '--package-name=%s' % pot_domain, '--package-version=%s' % release, '--copyright-holder', 'George Notaras <[email protected]>']
# Add php files as arguments
for rf in REL_FILES:
if rf.endswith('.php'):
args.append(rf)
for rf in os.listdir('metadata'):
if rf.endswith('.php'):
args.append( os.path.join( 'metadata', rf ) )
for rf in os.listdir('templates'):
if rf.endswith('.php'):
args.append( os.path.join( 'templates', rf ) )
print (' ').join(args)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# Replace POT Header
f = open('%s.pot' % pot_domain, 'r')
pot_lines = f.readlines()
f.close()
f = open('%s.pot' % pot_domain, 'w')
f.write(POT_HEADER)
for n, line in enumerate(pot_lines):
if n < 4:
continue
f.write(line)
f.close()
print 'Complete'
# Compile language .po files to .mo
print 'Compiling PO files to MO...'
for po_file in os.listdir('languages'):
if not po_file.endswith('.po'):
continue
po_path = os.path.join('languages', po_file)
print 'Converting', po_path
po = polib.pofile(po_path, encoding='utf-8')
mo_path = po_path[:-3] + '.mo'
po.save_as_mofile(mo_path)
print 'Complete'
print
print 'Creating distribution package...'
# Create release dir and move release files inside it
os.mkdir(name)
# Copy files
for p_file in REL_FILES:
shutil.copy(p_file, os.path.join(name, p_file))
# Copy dirs
for p_dir in REL_DIRS:
shutil.copytree(p_dir, os.path.join(name, p_dir))
# Create distribution package
d_package_path = '%s-%s.zip' % (name, release)
d_package = zipfile.ZipFile(d_package_path, 'w', zipfile.ZIP_DEFLATED)
# Append root files
for p_file in REL_FILES:
d_package.write(os.path.join(name, p_file))
# Append language directory
for p_dir in REL_DIRS:
d_package.write(os.path.join(name, p_dir))
# Append files in that directory
for p_file in os.listdir(os.path.join(name, p_dir)):
d_package.write(os.path.join(name, p_dir, p_file))
d_package.testzip()
d_package.comment = 'Official packaging by CodeTRAX'
d_package.printdir()
d_package.close()
# Remove the release dir
shutil.rmtree(name)
print 'Complete'
print
| apache-2.0 | -8,872,548,991,531,218,000 | 25.061538 | 298 | 0.64148 | false | 3.196226 | false | false | false |
cinnamoncoin/eloipool_Blakecoin | blake8.py | 2 | 19109 |
intro = """
blake.py
version 4
BLAKE is a SHA3 round-3 finalist designed and submitted by
Jean-Philippe Aumasson et al.
At the core of BLAKE is a ChaCha-like mixer, very similar
to that found in the stream cipher, ChaCha8. Besides being
a very good mixer, ChaCha is fast.
References:
http://www.131002.net/blake/
http://csrc.nist.gov/groups/ST/hash/sha-3/index.html
http://en.wikipedia.org/wiki/BLAKE_(hash_function)
This implementation assumes all data is in increments of
whole bytes. (The formal definition of BLAKE allows for
hashing individual bits.) Note too that this implementation
does include the round-3 tweaks where the number of rounds
was increased to 14/16 from 10/14.
This version can be imported into both Python2 and Python3
programs.
Here are some comparative run times for different versions
of Python:
64-bit:
2.6 6.28s
2.7 6.34s
3.2 7.62s
pypy (2.7) 2.08s
32-bit:
2.7 13.65s
3.2 12.57s
Another test on a 2.0GHz Core 2 Duo of 10,000 iterations of
BLAKE-256 on a short message produced a time of 5.7 seconds.
Not bad, but if raw speed is what you want, look to the t
he C version. It is 40x faster and did the same thing in
0.13 seconds.
Copyright (c) 2009-2012 by Larry Bugbee, Kent, WA
ALL RIGHTS RESERVED.
blake.py IS EXPERIMENTAL SOFTWARE FOR EDUCATIONAL
PURPOSES ONLY. IT IS MADE AVAILABLE "AS-IS" WITHOUT
WARRANTY OR GUARANTEE OF ANY KIND. USE SIGNIFIES
ACCEPTANCE OF ALL RISK.
To make your learning and experimentation less cumbersome,
blake.py is free for any use.
Enjoy,
Larry Bugbee
March 2011
rev May 2011 - fixed Python version check (tx JP)
rev Apr 2012 - fixed an out-of-order bit set in final()
- moved self-test to a separate test pgm
- this now works with Python2 and Python3
"""
import struct
try:
import psyco # works on some 32-bit Python2 versions only
have_psyco = True
print('psyco enabled')
except:
have_psyco = False
#---------------------------------------------------------------
class BLAKE(object):
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
# initial values, constants and padding
# IVx for BLAKE-x
IV64 = [
0x6A09E667F3BCC908, 0xBB67AE8584CAA73B,
0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1,
0x510E527FADE682D1, 0x9B05688C2B3E6C1F,
0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179,
]
IV48 = [
0xCBBB9D5DC1059ED8, 0x629A292A367CD507,
0x9159015A3070DD17, 0x152FECD8F70E5939,
0x67332667FFC00B31, 0x8EB44A8768581511,
0xDB0C2E0D64F98FA7, 0x47B5481DBEFA4FA4,
]
# note: the values here are the same as the high-order
# half-words of IV64
IV32 = [
0x6A09E667, 0xBB67AE85,
0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C,
0x1F83D9AB, 0x5BE0CD19,
]
# note: the values here are the same as the low-order
# half-words of IV48
IV28 = [
0xC1059ED8, 0x367CD507,
0x3070DD17, 0xF70E5939,
0xFFC00B31, 0x68581511,
0x64F98FA7, 0xBEFA4FA4,
]
# constants for BLAKE-64 and BLAKE-48
C64 = [
0x243F6A8885A308D3, 0x13198A2E03707344,
0xA4093822299F31D0, 0x082EFA98EC4E6C89,
0x452821E638D01377, 0xBE5466CF34E90C6C,
0xC0AC29B7C97C50DD, 0x3F84D5B5B5470917,
0x9216D5D98979FB1B, 0xD1310BA698DFB5AC,
0x2FFD72DBD01ADFB7, 0xB8E1AFED6A267E96,
0xBA7C9045F12C7F99, 0x24A19947B3916CF7,
0x0801F2E2858EFC16, 0x636920D871574E69,
]
# constants for BLAKE-32 and BLAKE-28
# note: concatenate and the values are the same as the values
# for the 1st half of C64
C32 = [
0x243F6A88, 0x85A308D3,
0x13198A2E, 0x03707344,
0xA4093822, 0x299F31D0,
0x082EFA98, 0xEC4E6C89,
0x452821E6, 0x38D01377,
0xBE5466CF, 0x34E90C6C,
0xC0AC29B7, 0xC97C50DD,
0x3F84D5B5, 0xB5470917,
]
# the 10 permutations of:0,...15}
SIGMA = [
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15],
[14,10, 4, 8, 9,15,13, 6, 1,12, 0, 2,11, 7, 5, 3],
[11, 8,12, 0, 5, 2,15,13,10,14, 3, 6, 7, 1, 9, 4],
[ 7, 9, 3, 1,13,12,11,14, 2, 6, 5,10, 4, 0,15, 8],
[ 9, 0, 5, 7, 2, 4,10,15,14, 1,11,12, 6, 8, 3,13],
[ 2,12, 6,10, 0,11, 8, 3, 4,13, 7, 5,15,14, 1, 9],
[12, 5, 1,15,14,13, 4,10, 0, 7, 6, 3, 9, 2, 8,11],
[13,11, 7,14,12, 1, 3, 9, 5, 0,15, 4, 8, 6, 2,10],
[ 6,15,14, 9,11, 3, 0, 8,12, 2,13, 7, 1, 4,10, 5],
[10, 2, 8, 4, 7, 6, 1, 5,15,11, 9,14, 3,12,13, 0],
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15],
[14,10, 4, 8, 9,15,13, 6, 1,12, 0, 2,11, 7, 5, 3],
[11, 8,12, 0, 5, 2,15,13,10,14, 3, 6, 7, 1, 9, 4],
[ 7, 9, 3, 1,13,12,11,14, 2, 6, 5,10, 4, 0,15, 8],
[ 9, 0, 5, 7, 2, 4,10,15,14, 1,11,12, 6, 8, 3,13],
[ 2,12, 6,10, 0,11, 8, 3, 4,13, 7, 5,15,14, 1, 9],
[12, 5, 1,15,14,13, 4,10, 0, 7, 6, 3, 9, 2, 8,11],
[13,11, 7,14,12, 1, 3, 9, 5, 0,15, 4, 8, 6, 2,10],
[ 6,15,14, 9,11, 3, 0, 8,12, 2,13, 7, 1, 4,10, 5],
[10, 2, 8, 4, 7, 6, 1, 5,15,11, 9,14, 3,12,13, 0],
]
MASK32BITS = 0xFFFFFFFF
MASK64BITS = 0xFFFFFFFFFFFFFFFF
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __init__(self, hashbitlen):
"""
load the hashSate structure (copy hashbitlen...)
hashbitlen: length of the hash output
"""
if hashbitlen not in [224, 256, 384, 512]:
raise Exception('hash length not 224, 256, 384 or 512')
self.hashbitlen = hashbitlen
self.h = [0]*8 # current chain value (initialized to the IV)
self.t = 0 # number of *BITS* hashed so far
self.cache = b'' # cached leftover data not yet compressed
self.salt = [0]*4 # salt (null by default)
self.init = 1 # set to 2 by update and 3 by final
self.nullt = 0 # Boolean value for special case \ell_i=0
# The algorithm is the same for both the 32- and 64- versions
# of BLAKE. The difference is in word size (4 vs 8 bytes),
# blocksize (64 vs 128 bytes), number of rounds (14 vs 16)
# and a few very specific constants.
if (hashbitlen == 224) or (hashbitlen == 256):
# setup for 32-bit words and 64-bit block
self.byte2int = self._fourByte2int
self.int2byte = self._int2fourByte
self.MASK = self.MASK32BITS
self.WORDBYTES = 4
self.WORDBITS = 32
self.BLKBYTES = 64
self.BLKBITS = 512
# self.ROUNDS = 14 # was 10 before round 3
self.ROUNDS = 8 # BLAKE 8 for blakecoin
self.cxx = self.C32
self.rot1 = 16 # num bits to shift in G
self.rot2 = 12 # num bits to shift in G
self.rot3 = 8 # num bits to shift in G
self.rot4 = 7 # num bits to shift in G
self.mul = 0 # for 32-bit words, 32<<self.mul where self.mul = 0
# 224- and 256-bit versions (32-bit words)
if hashbitlen == 224:
self.h = self.IV28[:]
else:
self.h = self.IV32[:]
elif (hashbitlen == 384) or (hashbitlen == 512):
# setup for 64-bit words and 128-bit block
self.byte2int = self._eightByte2int
self.int2byte = self._int2eightByte
self.MASK = self.MASK64BITS
self.WORDBYTES = 8
self.WORDBITS = 64
self.BLKBYTES = 128
self.BLKBITS = 1024
self.ROUNDS = 16 # was 14 before round 3
self.cxx = self.C64
self.rot1 = 32 # num bits to shift in G
self.rot2 = 25 # num bits to shift in G
self.rot3 = 16 # num bits to shift in G
self.rot4 = 11 # num bits to shift in G
self.mul = 1 # for 64-bit words, 32<<self.mul where self.mul = 1
# 384- and 512-bit versions (64-bit words)
if hashbitlen == 384:
self.h = self.IV48[:]
else:
self.h = self.IV64[:]
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _compress(self, block):
byte2int = self.byte2int
mul = self.mul # de-reference these for ...speed? ;-)
cxx = self.cxx
rot1 = self.rot1
rot2 = self.rot2
rot3 = self.rot3
rot4 = self.rot4
MASK = self.MASK
WORDBITS = self.WORDBITS
SIGMA = self.SIGMA
# get message (<<2 is the same as *4 but faster)
m = [byte2int(block[i<<2<<mul:(i<<2<<mul)+(4<<mul)]) for i in range(16)]
# initialization
v = [0]*16
v[ 0: 8] = [self.h[i] for i in range(8)]
v[ 8:16] = [self.cxx[i] for i in range(8)]
v[ 8:12] = [v[8+i] ^ self.salt[i] for i in range(4)]
if self.nullt == 0: # (i>>1 is the same as i/2 but faster)
v[12] = v[12] ^ (self.t & MASK)
v[13] = v[13] ^ (self.t & MASK)
v[14] = v[14] ^ (self.t >> self.WORDBITS)
v[15] = v[15] ^ (self.t >> self.WORDBITS)
# - - - - - - - - - - - - - - - - -
# ready? let's ChaCha!!!
def G(a, b, c, d, i):
va = v[a] # it's faster to deref and reref later
vb = v[b]
vc = v[c]
vd = v[d]
sri = SIGMA[round][i]
sri1 = SIGMA[round][i+1]
va = ((va + vb) + (m[sri] ^ cxx[sri1]) ) & MASK
x = vd ^ va
vd = (x >> rot1) | ((x << (WORDBITS-rot1)) & MASK)
vc = (vc + vd) & MASK
x = vb ^ vc
vb = (x >> rot2) | ((x << (WORDBITS-rot2)) & MASK)
va = ((va + vb) + (m[sri1] ^ cxx[sri]) ) & MASK
x = vd ^ va
vd = (x >> rot3) | ((x << (WORDBITS-rot3)) & MASK)
vc = (vc + vd) & MASK
x = vb ^ vc
vb = (x >> rot4) | ((x << (WORDBITS-rot4)) & MASK)
v[a] = va
v[b] = vb
v[c] = vc
v[d] = vd
for round in range(self.ROUNDS):
# column step
G( 0, 4, 8,12, 0)
G( 1, 5, 9,13, 2)
G( 2, 6,10,14, 4)
G( 3, 7,11,15, 6)
# diagonal step
G( 0, 5,10,15, 8)
G( 1, 6,11,12,10)
G( 2, 7, 8,13,12)
G( 3, 4, 9,14,14)
# - - - - - - - - - - - - - - - - -
# save current hash value (use i&0x3 to get 0,1,2,3,0,1,2,3)
self.h = [self.h[i]^v[i]^v[i+8]^self.salt[i&0x3]
for i in range(8)]
# print 'self.h', [num2hex(h) for h in self.h]
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
def addsalt(self, salt):
""" adds a salt to the hash function (OPTIONAL)
should be called AFTER Init, and BEFORE update
salt: a bytestring, length determined by hashbitlen.
if not of sufficient length, the bytestring
will be assumed to be a big endian number and
prefixed with an appropriate number of null
bytes, and if too large, only the low order
bytes will be used.
if hashbitlen=224 or 256, then salt will be 16 bytes
if hashbitlen=384 or 512, then salt will be 32 bytes
"""
# fail if addsalt() was not called at the right time
if self.init != 1:
raise Exception('addsalt() not called after init() and before update()')
# salt size is to be 4x word size
saltsize = self.WORDBYTES * 4
# if too short, prefix with null bytes. if too long,
# truncate high order bytes
if len(salt) < saltsize:
salt = (chr(0)*(saltsize-len(salt)) + salt)
else:
salt = salt[-saltsize:]
# prep the salt array
self.salt[0] = self.byte2int(salt[ : 4<<self.mul])
self.salt[1] = self.byte2int(salt[ 4<<self.mul: 8<<self.mul])
self.salt[2] = self.byte2int(salt[ 8<<self.mul:12<<self.mul])
self.salt[3] = self.byte2int(salt[12<<self.mul: ])
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
def update(self, data):
""" update the state with new data, storing excess data
as necessary. may be called multiple times and if a
call sends less than a full block in size, the leftover
is cached and will be consumed in the next call
data: data to be hashed (bytestring)
"""
self.init = 2
BLKBYTES = self.BLKBYTES # de-referenced for improved readability
BLKBITS = self.BLKBITS
datalen = len(data)
if not datalen: return
left = len(self.cache)
fill = BLKBYTES - left
# if any cached data and any added new data will fill a
# full block, fill and compress
if left and datalen >= fill:
self.cache = self.cache + data[:fill]
self.t += BLKBITS # update counter
self._compress(self.cache)
self.cache = b''
data = data[fill:]
datalen -= fill
# compress new data until not enough for a full block
while datalen >= BLKBYTES:
self.t += BLKBITS # update counter
self._compress(data[:BLKBYTES])
data = data[BLKBYTES:]
datalen -= BLKBYTES
# cache all leftover bytes until next call to update()
if datalen > 0:
self.cache = self.cache + data[:datalen]
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
def final(self, data=''):
""" finalize the hash -- pad and hash remaining data
returns hashval, the digest
"""
ZZ = b'\x00'
ZO = b'\x01'
OZ = b'\x80'
OO = b'\x81'
PADDING = OZ + ZZ*128 # pre-formatted padding data
if data:
self.update(data)
# copy nb. bits hash in total as a 64-bit BE word
# copy nb. bits hash in total as a 128-bit BE word
tt = self.t + (len(self.cache) << 3)
if self.BLKBYTES == 64:
msglen = self._int2eightByte(tt)
else:
low = tt & self.MASK
high = tt >> self.WORDBITS
msglen = self._int2eightByte(high) + self._int2eightByte(low)
# size of block without the words at the end that count
# the number of bits, 55 or 111.
# Note: (((self.WORDBITS/8)*2)+1) equals ((self.WORDBITS>>2)+1)
sizewithout = self.BLKBYTES - ((self.WORDBITS>>2)+1)
if len(self.cache) == sizewithout:
# special case of one padding byte
self.t -= 8
if self.hashbitlen in [224, 384]:
self.update(OZ)
else:
self.update(OO)
else:
if len(self.cache) < sizewithout:
# enough space to fill the block
# use t=0 if no remaining data
if len(self.cache) == 0:
self.nullt=1
self.t -= (sizewithout - len(self.cache)) << 3
self.update(PADDING[:sizewithout - len(self.cache)])
else:
# NOT enough space, need 2 compressions
# ...add marker, pad with nulls and compress
self.t -= (self.BLKBYTES - len(self.cache)) << 3
self.update(PADDING[:self.BLKBYTES - len(self.cache)])
# ...now pad w/nulls leaving space for marker & bit count
self.t -= (sizewithout+1) << 3
self.update(PADDING[1:sizewithout+1]) # pad with zeroes
self.nullt = 1 # raise flag to set t=0 at the next _compress
# append a marker byte
if self.hashbitlen in [224, 384]:
self.update(ZZ)
else:
self.update(ZO)
self.t -= 8
# append the number of bits (long long)
self.t -= self.BLKBYTES
self.update(msglen)
hashval = []
if self.BLKBYTES == 64:
for h in self.h:
hashval.append(self._int2fourByte(h))
else:
for h in self.h:
hashval.append(self._int2eightByte(h))
return b''.join(hashval)[:self.hashbitlen >> 3]
digest = final # may use digest() as a synonym for final()
def midstate(self, data=''):
if data:
self.update(data)
hashval = []
if self.BLKBYTES == 64:
for h in self.h:
hashval.append(self._int2fourByte(h))
else:
for h in self.h:
hashval.append(self._int2eightByte(h))
return b''.join(hashval)[:self.hashbitlen >> 3]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# utility functions
def _fourByte2int(self, bytestr): # see also long2byt() below
""" convert a 4-byte string to an int (long) """
return struct.unpack('!L', bytestr)[0]
def _eightByte2int(self, bytestr):
""" convert a 8-byte string to an int (long long) """
return struct.unpack('!Q', bytestr)[0]
def _int2fourByte(self, x): # see also long2byt() below
""" convert a number to a 4-byte string, high order
truncation possible (in Python x could be a BIGNUM)
"""
return struct.pack('!L', x)
def _int2eightByte(self, x):
""" convert a number to a 8-byte string, high order
truncation possible (in Python x could be a BIGNUM)
"""
return struct.pack('!Q', x)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if have_psyco:
_compress = psyco.proxy(self._compress)
#---------------------------------------------------------------
#---------------------------------------------------------------
#---------------------------------------------------------------
| mit | 402,631,691,257,087,700 | 36.104854 | 88 | 0.487257 | false | 3.194951 | true | false | false |
DedMemez/ODS-August-2017 | abc.py | 1 | 3785 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: abc
import types
from _weakrefset import WeakSet
class _C:
pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
__isabstractmethod__ = True
class ABCMeta(type):
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
abstracts = set((name for name, value in namespace.items() if getattr(value, '__isabstractmethod__', False)))
for base in bases:
for name in getattr(base, '__abstractmethods__', set()):
value = getattr(cls, name, None)
if getattr(value, '__isabstractmethod__', False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError('Can only register classes')
if issubclass(subclass, cls):
return
if issubclass(cls, subclass):
raise RuntimeError('Refusing to create an inheritance cycle')
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1
def _dump_registry(cls, file = None):
print >> file, 'Class: %s.%s' % (cls.__module__, cls.__name__)
print >> file, 'Inv.counter: %s' % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith('_abc_'):
value = getattr(cls, name)
print >> file, '%s: %r' % (name, value)
def __instancecheck__(cls, instance):
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if cls._abc_negative_cache_version == ABCMeta._abc_invalidation_counter and subtype in cls._abc_negative_cache:
return False
return cls.__subclasscheck__(subtype)
else:
return cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype)
def __subclasscheck__(cls, subclass):
if subclass in cls._abc_cache:
return True
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
cls._abc_negative_cache.add(subclass)
return False | apache-2.0 | -5,928,648,413,365,189,000 | 35.87 | 123 | 0.57041 | false | 4.401163 | false | false | false |
cstipkovic/spidermonkey-research | testing/marionette/harness/marionette/tests/unit/test_addons.py | 1 | 1985 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import unittest
from marionette import MarionetteTestCase
from marionette_driver.addons import Addons, AddonInstallException
here = os.path.abspath(os.path.dirname(__file__))
class TestAddons(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
self.addons = Addons(self.marionette)
@property
def all_addon_ids(self):
with self.marionette.using_context('chrome'):
addons = self.marionette.execute_async_script("""
Components.utils.import("resource://gre/modules/AddonManager.jsm");
AddonManager.getAllAddons(function(addons){
let ids = addons.map(function(x) {
return x.id;
});
marionetteScriptFinished(ids);
});
""")
return addons
def test_install_and_remove_temporary_unsigned_addon(self):
addon_path = os.path.join(here, 'mn-restartless-unsigned.xpi')
addon_id = self.addons.install(addon_path, temp=True)
self.assertIn(addon_id, self.all_addon_ids)
self.addons.uninstall(addon_id)
self.assertNotIn(addon_id, self.all_addon_ids)
def test_install_unsigned_addon(self):
addon_path = os.path.join(here, 'mn-restartless-unsigned.xpi')
with self.assertRaises(AddonInstallException):
self.addons.install(addon_path)
@unittest.skip("need to get the test extension signed")
def test_install_and_remove_signed_addon(self):
addon_path = os.path.join(here, 'mn-restartless-signed.xpi')
addon_id = self.addons.install(addon_path)
self.assertIn(addon_id, self.all_addon_ids)
self.addons.uninstall(addon_id)
self.assertNotIn(addon_id, self.all_addon_ids)
| mpl-2.0 | 3,420,210,611,468,736,500 | 33.224138 | 81 | 0.654408 | false | 3.615665 | true | false | false |
mr-karan/coala | coalib/bearlib/languages/documentation/DocumentationComment.py | 4 | 5320 | from collections import namedtuple
from coala_utils.decorators import generate_eq, generate_repr
@generate_repr()
@generate_eq("documentation", "language", "docstyle",
"indent", "marker", "range")
class DocumentationComment:
"""
The DocumentationComment holds information about a documentation comment
inside source-code, like position etc.
"""
Parameter = namedtuple('Parameter', 'name, desc')
ReturnValue = namedtuple('ReturnValue', 'desc')
Description = namedtuple('Description', 'desc')
def __init__(self, documentation, language,
docstyle, indent, marker, range):
"""
Instantiates a new DocumentationComment.
:param documentation: The documentation text.
:param language: The language of the documention.
:param docstyle: The docstyle used in the documentation.
:param indent: The string of indentation used in front
of the first marker of the documentation.
:param marker: The three-element tuple with marker strings,
that identified this documentation comment.
:param range: The position range of type TextRange.
"""
self.documentation = documentation
self.language = language.lower()
self.docstyle = docstyle.lower()
self.indent = indent
self.marker = marker
self.range = range
def __str__(self):
return self.documentation
def parse(self):
"""
Parses documentation independent of language and docstyle.
:return:
The list of all the parsed sections of the documentation. Every
section is a namedtuple of either ``Description`` or ``Parameter``
or ``ReturnValue``.
:raises NotImplementedError:
When no parsing method is present for the given language and
docstyle.
"""
if self.language == "python" and self.docstyle == "default":
return self._parse_documentation_with_symbols(
(":param ", ": "), ":return: ")
elif self.language == "python" and self.docstyle == "doxygen":
return self._parse_documentation_with_symbols(
("@param ", " "), "@return ")
elif self.language == "java" and self.docstyle == "default":
return self._parse_documentation_with_symbols(
("@param ", " "), "@return ")
else:
raise NotImplementedError(
"Documentation parsing for {0.language!r} in {0.docstyle!r}"
" has not been implemented yet".format(self))
def _parse_documentation_with_symbols(self, param_identifiers,
return_identifiers):
"""
Parses documentation based on parameter and return symbols.
:param param_identifiers:
A tuple of two strings with which a parameter starts and ends.
:param return_identifiers:
The string with which a return description starts.
:return:
The list of all the parsed sections of the documentation. Every
section is a namedtuple of either ``Description`` or ``Parameter``
or ``ReturnValue``.
"""
lines = self.documentation.splitlines(keepends=True)
parse_mode = self.Description
cur_param = ""
desc = ""
parsed = []
for line in lines:
stripped_line = line.strip()
if stripped_line.startswith(param_identifiers[0]):
parse_mode = self.Parameter
param_offset = line.find(
param_identifiers[0]) + len(param_identifiers[0])
splitted = line[param_offset:].split(param_identifiers[1], 1)
cur_param = splitted[0].strip()
# For cases where the param description is not on the
# same line, but on subsequent lines.
try:
param_desc = splitted[1]
except IndexError:
param_desc = ""
parsed.append(self.Parameter(name=cur_param, desc=param_desc))
elif stripped_line.startswith(return_identifiers):
parse_mode = self.ReturnValue
return_offset = line.find(
return_identifiers) + len(return_identifiers)
retval_desc = line[return_offset:]
parsed.append(self.ReturnValue(desc=retval_desc))
elif parse_mode == self.ReturnValue:
retval_desc += line
parsed.pop()
parsed.append(self.ReturnValue(desc=retval_desc))
elif parse_mode == self.Parameter:
param_desc += line
parsed.pop()
parsed.append(self.Parameter(name=cur_param, desc=param_desc))
else:
desc += line
# This is inside a try-except for cases where the list
# is empty and has nothing to pop.
try:
parsed.pop()
except IndexError:
pass
parsed.append(self.Description(desc=desc))
return parsed
| agpl-3.0 | -4,701,396,301,550,918,000 | 38.117647 | 78 | 0.564286 | false | 5.028355 | false | false | false |
DavidAndreev/indico | migrations/versions/201502111317_233928da84b2_create_video_conference_rooms.py | 4 | 2113 | """Create videoconference rooms
Revision ID: 233928da84b2
Revises: 50c2b5ee2726
Create Date: 2015-02-11 13:17:44.365589
"""
import sqlalchemy as sa
from alembic import op
from indico.core.db.sqlalchemy import PyIntEnum
from indico.core.db.sqlalchemy import UTCDateTime
from indico.modules.vc.models.vc_rooms import VCRoomLinkType, VCRoomStatus
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '233928da84b2'
down_revision = '5583f647dff5'
def upgrade():
op.create_table('vc_rooms',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('status', PyIntEnum(VCRoomStatus), nullable=False),
sa.Column('created_by_id', sa.Integer(), nullable=False, index=True),
sa.Column('created_dt', UTCDateTime, nullable=False),
sa.Column('modified_dt', UTCDateTime, nullable=True),
sa.Column('data', postgresql.JSON(), nullable=False),
sa.PrimaryKeyConstraint('id'),
schema='events')
op.create_table('vc_room_events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('event_id', sa.Integer(), autoincrement=False, nullable=False, index=True),
sa.Column('vc_room_id', sa.Integer(), nullable=False, index=True),
sa.Column('link_type', PyIntEnum(VCRoomLinkType), nullable=False),
sa.Column('link_id', sa.String(), nullable=True),
sa.Column('show', sa.Boolean(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=False),
sa.ForeignKeyConstraint(['vc_room_id'], ['events.vc_rooms.id']),
sa.PrimaryKeyConstraint('id'),
schema='events')
def downgrade():
op.drop_table('vc_room_events', schema='events')
op.drop_table('vc_rooms', schema='events')
| gpl-3.0 | 6,583,649,150,840,279,000 | 43.020833 | 105 | 0.599148 | false | 3.90573 | false | false | false |
CardiacAtlasProject/CAPServer2.0 | dbase/utils/test_connect.py | 1 | 1654 | """Test connection to a MySQL server
Usage:
test_connect.py [options] <host> <user>
test_connect.py -h
Arguments:
host MySQL server IP address
user Username to connect with
Options:
-h, --help Show this screen
-d, --debug Show some debug information
-p, --port MySQL port. Default is 3306.
--password=<password> User password.
Author: Avan Suinesiaputra - University of Auckland (2017)
"""
# Docopt is a library for parsing command line arguments
import docopt
import getpass
import mysql.connector
if __name__ == '__main__':
try:
# Parse arguments, use file docstring as a parameter definition
arguments = docopt.docopt(__doc__)
# Default values
if not arguments['--port']:
arguments['--port'] = 3306
# Check password
if arguments['--password'] is None:
arguments['--password'] = getpass.getpass('Password: ')
# print arguments for debug
if arguments['--debug']:
print arguments
# Handle invalid options
except docopt.DocoptExit as e:
print e.message
exit()
# everything goes fine
# let's go!
print 'Connecting mysql://' + arguments['<host>'] + ':' + str(arguments['--port']) + ' ...'
try:
cnx = mysql.connector.connect(user=arguments['<user>'],
host=arguments['<host>'],
port=arguments['--port'],
password=arguments['--password'])
except mysql.connector.Error as err:
print(err)
else:
print "SUCCESS"
cnx.close()
| apache-2.0 | -1,585,207,934,581,546,200 | 25.253968 | 91 | 0.573156 | false | 4.230179 | false | false | false |
msmolens/VTK | Parallel/Core/Testing/Python/TestPolyDataPieces.py | 12 | 2945 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
math = vtk.vtkMath()
math.RandomSeed(22)
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(32)
sphere.SetThetaResolution(32)
extract = vtk.vtkExtractPolyDataPiece()
extract.SetInputConnection(sphere.GetOutputPort())
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(extract.GetOutputPort())
ps = vtk.vtkPieceScalars()
ps.SetInputConnection(normals.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(ps.GetOutputPort())
mapper.SetNumberOfPieces(2)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
sphere2 = vtk.vtkSphereSource()
sphere2.SetPhiResolution(32)
sphere2.SetThetaResolution(32)
extract2 = vtk.vtkExtractPolyDataPiece()
extract2.SetInputConnection(sphere2.GetOutputPort())
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(extract2.GetOutputPort())
mapper2.SetNumberOfPieces(2)
mapper2.SetPiece(1)
mapper2.SetScalarRange(0, 4)
mapper2.SetScalarModeToUseCellFieldData()
mapper2.SetColorModeToMapScalars()
mapper2.ColorByArrayComponent(vtk.vtkDataSetAttributes.GhostArrayName(), 0)
mapper2.SetGhostLevel(4)
# check the pipeline size
extract2.UpdateInformation()
psize = vtk.vtkPipelineSize()
if (psize.GetEstimatedSize(extract2, 0, 0) > 100):
print ("ERROR: Pipeline Size increased")
pass
if (psize.GetNumberOfSubPieces(10, mapper2) != 1):
print ("ERROR: Number of sub pieces changed",
psize.GetNumberOfSubPieces(10, mapper2))
pass
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
actor2.SetPosition(1.5, 0, 0)
sphere3 = vtk.vtkSphereSource()
sphere3.SetPhiResolution(32)
sphere3.SetThetaResolution(32)
extract3 = vtk.vtkExtractPolyDataPiece()
extract3.SetInputConnection(sphere3.GetOutputPort())
ps3 = vtk.vtkPieceScalars()
ps3.SetInputConnection(extract3.GetOutputPort())
mapper3 = vtk.vtkPolyDataMapper()
mapper3.SetInputConnection(ps3.GetOutputPort())
mapper3.SetNumberOfSubPieces(8)
mapper3.SetScalarRange(0, 8)
actor3 = vtk.vtkActor()
actor3.SetMapper(mapper3)
actor3.SetPosition(0, -1.5, 0)
sphere4 = vtk.vtkSphereSource()
sphere4.SetPhiResolution(32)
sphere4.SetThetaResolution(32)
extract4 = vtk.vtkExtractPolyDataPiece()
extract4.SetInputConnection(sphere4.GetOutputPort())
ps4 = vtk.vtkPieceScalars()
ps4.RandomModeOn()
ps4.SetScalarModeToCellData()
ps4.SetInputConnection(extract4.GetOutputPort())
mapper4 = vtk.vtkPolyDataMapper()
mapper4.SetInputConnection(ps4.GetOutputPort())
mapper4.SetNumberOfSubPieces(8)
mapper4.SetScalarRange(0, 8)
actor4 = vtk.vtkActor()
actor4.SetMapper(mapper4)
actor4.SetPosition(1.5, -1.5, 0)
ren = vtk.vtkRenderer()
ren.AddActor(actor)
ren.AddActor(actor2)
ren.AddActor(actor3)
ren.AddActor(actor4)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
#iren.Start()
| bsd-3-clause | 5,129,533,428,117,693,000 | 24.833333 | 75 | 0.798981 | false | 3.036082 | false | true | false |
ticketea/forseti | forseti/cli.py | 1 | 2818 | #!/usr/bin/env python
"""Forseti is a tool to manage AWS autoscaling groups.
Usage:
{% for doc in command_docs -%}
forseti {{ doc }}
{% endfor -%}
forseti (-h | --help)
forseti --version
Options:
{% for doc in command_options -%}
{{ doc }}
{% endfor -%}
-h --help Show this screen.
--version Show version.
"""
import sys
from docopt import docopt
from forseti.metadata import __version__ as forseti_version
from forseti.configuration import ForsetiConfiguration
from forseti.commands.base import get_all_commands
from forseti.commands.commands import CleanUpAutoscaleConfigurationsCommand
from jinja2 import Template
import os.path
def get_configuration_file_path():
return os.path.abspath(os.path.expanduser('~/.forseti/config.json'))
def read_configuration_file():
config_path = get_configuration_file_path()
if not os.path.exists(config_path):
raise ValueError("Configuration file does not exist at %r" % config_path)
try:
return ForsetiConfiguration(config_path)
except ValueError as exception:
print("Invalid JSON configuration file {}\n".format(config_path))
raise exception
def generate_dosctring():
commands_documentation = []
options_documentation = []
commands = get_all_commands()
for command_class in commands:
command = command_class()
command_doc = command.cli_command_doc()
if command_doc:
commands_documentation.append(command_doc)
comand_options_docs = command.cli_command_options_doc()
if comand_options_docs:
options_documentation.append(comand_options_docs)
return Template(__doc__).render(
command_docs=commands_documentation,
command_options=options_documentation,
app_name=sys.argv[0]
)
def commands_arguments_mapper():
mapper = []
commands = get_all_commands()
for command_class in commands:
command = command_class()
mapper.append(
(command.cli_command_name(), command)
)
return mapper
def should_run_cleanup(forseti_command):
return forseti_command.cli_command_name() == "deploy"
def main():
arguments = docopt(generate_dosctring())
if arguments['--version']:
print("Forseti {}".format(forseti_version))
return
configuration = read_configuration_file()
for cli_command, forseti_command in commands_arguments_mapper():
if arguments[cli_command]:
forseti_command.run(configuration, arguments)
if should_run_cleanup(forseti_command):
forseit_cleanup_command = CleanUpAutoscaleConfigurationsCommand()
forseit_cleanup_command.run(configuration, arguments)
if __name__ == '__main__':
main()
| isc | 9,020,402,200,622,750,000 | 27.464646 | 81 | 0.658978 | false | 3.952314 | true | false | false |
google/certificate-transparency | python/ct/crypto/asn1/type_test_base.py | 35 | 4516 | import unittest
from ct.crypto import error
class TypeTestBase(unittest.TestCase):
# Test class for each concrete type should fill this in.
asn1_type = None
# Immutable types support hashing.
immutable = True
# Repeated types support lookup and assignment by index.
repeated = True
# Keyed types support lookup and assignment by key.
keyed = True
# A tuple of initializer tuples; components in each tuple should yield
# equal objects. The first component in each tuple should be the canonical
# value (returned by .value).
initializers = None
# A tuple of (bad_initializer, exception_raised) pairs.
bad_initializers = None
# A tuple of (value, hex_der_encoding) pairs.
# Note: test vectors should include the complete encoding (including tag
# and length). This is so we can lift test vectors directly from the ASN.1
# spec and test that we recognize the correct tag for each type.
# However test vectors for invalid encodings should focus on type-specific
# corner cases. It's not necessary for each type to verify that invalid
# tags and lengths are rejected: this is covered in separate tests.
encode_test_vectors = None
# A tuple of of serialized, hex-encoded values.
bad_encodings = None
# A tuple of (value, hex_encoding) pairs that can only be decoded
# in non-strict mode.
bad_strict_encodings = None
def test_create(self):
for initializer_set in self.initializers:
value = initializer_set[0]
# The canonical initializer.
for i in initializer_set:
o1 = self.asn1_type(value=i)
self.assertEqual(o1.value, value)
# And other initializers that yield the same value.
for j in initializer_set:
o2 = self.asn1_type(value=j)
self.assertEqual(o2.value, value)
self.assertEqual(o1, o2)
if self.immutable:
self.assertEqual(hash(o1), hash(o2))
elif self.repeated:
self.assertEqual(len(o1), len(o2))
for i in range(len(o1)):
self.assertEqual(o1[i], o2[i])
elif self.keyed:
self.assertEqual(len(o1), len(o2))
self.assertEqual(o1.keys(), o2.keys())
for key in o1:
self.assertEqual(o1[key], o2[key])
# Sanity-check: different initializers yield different values.
for i in range(len(self.initializers)):
for j in range(i+1, len(self.initializers)):
o1 = self.asn1_type(value=self.initializers[i][0])
o2 = self.asn1_type(value=self.initializers[j][0])
self.assertNotEqual(o1, o2)
if self.immutable:
self.assertNotEqual(hash(o1), hash(o2))
self.assertNotEqual(o1.value, o2.value)
def test_create_fails(self):
for init, err in self.bad_initializers:
self.assertRaises(err, self.asn1_type, init)
def test_encode_decode(self):
for value, enc in self.encode_test_vectors:
o1 = self.asn1_type(value=value)
o2 = self.asn1_type.decode(enc.decode("hex"))
self.assertEqual(o1, o2)
self.assertEqual(o1.value, o2.value)
self.assertEqual(enc, o1.encode().encode("hex"))
self.assertEqual(enc, o2.encode().encode("hex"))
def test_decode_fails(self):
for bad_enc in self.bad_encodings:
self.assertRaises(error.ASN1Error, self.asn1_type.decode,
bad_enc.decode("hex"))
self.assertRaises(error.ASN1Error, self.asn1_type.decode,
bad_enc.decode("hex"), strict=False)
def test_strict_decode_fails(self):
for value, bad_enc in self.bad_strict_encodings:
o = self.asn1_type(value=value)
self.assertRaises(error.ASN1Error,
self.asn1_type.decode, bad_enc.decode("hex"))
o2 = self.asn1_type.decode(bad_enc.decode("hex"), strict=False)
self.assertEqual(o, o2)
# The object should keep its original encoding...
self.assertEqual(bad_enc, o2.encode().encode("hex"))
# ... which is not the canonical encoding.
self.assertNotEqual(bad_enc, o.encode().encode("hex"))
| apache-2.0 | -6,243,667,938,546,458,000 | 44.16 | 78 | 0.593224 | false | 4.072137 | true | false | false |
prospwro/odoo | addons/irsid_edu_training/models/student_program.py | 2 | 13940 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution Addon
# Copyright (C) 2009-2013 IRSID (<http://irsid.ru>),
# Paul Korotkov ([email protected]).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from core import EDU_STATES
class edu_student_program(osv.Model):
_name = 'edu.student.program'
_description = 'Student Program'
_inherit = ['mail.thread']
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
for st_program in self.browse(cr, uid, ids, context):
res[st_program.id] = st_program.stage_id.state
return res
# Access Functions
def create(self, cr, uid, vals, context=None):
if vals.get('code','/')=='/':
vals['code'] = self.pool.get('ir.sequence').get(cr, uid, 'edu.student.program') or '/'
return super(edu_student_program, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
super(edu_student_program, self).write(cr, uid, ids, vals, context=context)
if isinstance(ids, (int, long)):
ids = [ids]
for st_program in self.browse(cr, uid, ids, context=context):
student_id = st_program.student_id.id
if student_id not in st_program.message_follower_ids:
self.message_subscribe(cr, uid, ids, [student_id], context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({
'code': self.pool.get('ir.sequence').get(cr, uid, 'edu.student.program'),
})
return super(edu_student_program, self).copy(cr, uid, id, default, context=context)
def unlink(self, cr, uid, ids, context=None):
context = context or {}
for record in self.browse(cr, uid, ids, context=context):
if record.state not in ['draft']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete document in state \'%s\'.') % record.state)
return super(edu_student_program, self).unlink(cr, uid, ids, context=context)
# Naming Functions
def _name_get_fnc(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for st_program in self.browse(cr, uid, ids, context=context):
result[st_program.id] = st_program.code + ': ' + st_program.student_id.name
return result
# Update Functions
def _update_list_by_student(self, cr, uid, ids, context=None):
return self.pool.get('edu.student.program').search(cr, uid, [('student_id', 'in', ids)], context=context)
def _update_list_by_stage(self, cr, uid, ids, context=None):
return self.pool.get('edu.student.program').search(cr, uid, [('stage_id', 'in', ids)], context=context)
# Onchange Functions
def onchange_program_id(self, cr, uid, ids, program_id, context=None):
if program_id:
program = self.pool.get('edu.program').browse(cr, uid, program_id, context=context)
return {'value': {
'speciality_id': program.speciality_id.id,
'mode_id': program.mode_id.id,
'stage_id': program.stage_ids[0].id or False,
'plan_id': False,
}}
return {'value': {}}
# Other Functions
def make_work_orders(self, cr, uid, ids, context=None):
work_order_obj = self.pool.get('edu.work.order')
work_obj = self.pool.get('edu.work')
module_work_obj = self.pool.get('edu.module.work')
line_obj = self.pool.get('edu.order.line')
year_id = self.pool.get('edu.year').search(cr, uid, [], limit=1, context=context)[0]
cr.execute("""
SELECT DISTINCT
program_id,
stage_id
FROM
edu_student_program
WHERE
id IN %s
""",(tuple(ids),))
params = cr.fetchall()
if params:
for param in params:
cr.execute("""
SELECT DISTINCT
module_id
FROM
edu_plan_module_rel
WHERE
plan_id IN (
SELECT DISTINCT
plan_id
FROM
edu_student_program
WHERE
id IN %s AND
program_id = %s AND
stage_id = %s
)
""",(tuple(ids), param[0], param[1],))
module_ids = [r[0] for r in cr.fetchall()]
module_work_ids = module_work_obj.search(cr, uid, [
('time_id.period_id.stage_id','=',param[1]),
('module_id','in', module_ids),
], context=context)
if module_work_ids:
work_order_ids = work_order_obj.search(cr, uid, [
('year_id','=',year_id),
('program_id','=',param[0]),
('stage_id','=',param[1]),
('state','=','draft'),
], context=context)
if len(work_order_ids):
work_order_id = work_order_ids[0]
else:
vals = work_order_obj.onchange_year_id(cr, uid, ids, year_id, context=context)['value']
vals['year_id'] = year_id
vals['program_id'] = param[0]
vals['stage_id'] = param[1]
vals['name'] = 'Об установлении учебной нагрузки'
work_order_id = work_order_obj.create(cr, uid, vals, context=context)
cr.execute("""
SELECT
time_id,
date_start,
date_stop
FROM
edu_schedule_line
WHERE
year_id = %s AND
program_id = %s AND
state = 'approved'
""",(year_id, param[0],))
schedule_line = dict(map(lambda x: (x[0], (x[1],x[2])), cr.fetchall()))
for module_work in module_work_obj.browse(cr, uid, module_work_ids, context = context):
cr.execute("""
SELECT
id
FROM
edu_student_program
WHERE
id IN %s AND
program_id = %s AND
stage_id = %s AND
plan_id IN %s
""",(tuple(ids), param[0], param[1], tuple(plan.id for plan in module_work.module_id.plan_ids)))
st_program_ids = [r[0] for r in cr.fetchall()]
work_ids = work_obj.search(cr, uid, [('modulework_id','=',module_work.id),('order_id','=',work_order_id)], context=context)
if len(work_ids):
dates = schedule_line.get(module_work.time_id.id,(False, False))
work_obj.write(cr, uid, work_ids, {
'date_start': dates[0],
'date_stop': dates[1],
'st_program_ids': [(6, 0, st_program_ids)]
}, context=context)
else:
vals = work_obj.onchange_modulework_id(cr, uid, ids, module_work.id, context=context)['value']
vals['order_id'] = work_order_id
vals['modulework_id'] = module_work.id
dates = schedule_line.get(module_work.time_id.id,(False, False))
vals['date_start'] = dates[0]
vals['date_stop'] = dates[1]
vals['st_program_ids'] = [(6, 0, st_program_ids)]
work_obj.create(cr, uid, vals, context = context)
return True
# Fields
_columns = {
'code': fields.char(
'Code',
size = 32,
required = True,
readonly = True,
states = {'draft': [('readonly',False)]},
),
'name': fields.function(
_name_get_fnc,
type='char',
string = 'Name',
store = {
'edu.student.program': (lambda self, cr, uid, ids, c={}: ids, ['code', 'student_id'], 10),
'res.partner': (_update_list_by_student, ['name'], 20),
},
readonly = True,
),
'student_id': fields.many2one(
'res.partner',
'Student',
domain="[('student','=',True)]",
required = True,
readonly = True,
states = {'draft': [('readonly',False)]},
track_visibility='onchange',
),
'program_id': fields.many2one(
'edu.program',
'Education Program',
required = True,
readonly = True,
states = {'draft': [('readonly',False)]},
track_visibility='onchange',
),
'speciality_id': fields.related(
'program_id',
'speciality_id',
type='many2one',
relation = 'edu.speciality',
string = 'Speciality',
store = True,
readonly = True,
),
'mode_id': fields.related(
'program_id',
'mode_id',
type='many2one',
relation = 'edu.mode',
string = 'Mode Of Study',
store = True,
readonly = True,
),
'group_id': fields.many2one(
'edu.group',
'Group',
track_visibility='onchange',
),
'plan_id': fields.many2one(
'edu.plan',
'Training Plan',
readonly = True,
states = {'draft': [('readonly',False)]},
track_visibility='onchange',
),
'stage_id': fields.many2one(
'edu.stage',
'Stage',
readonly = True,
required = True,
states = {'draft': [('readonly',False)]},
track_visibility='onchange',
),
'color': fields.integer(
'Color Index',
),
'status': fields.selection(
[
('student', 'Student'),
('listener', 'Listener'),
],
'Status',
required = True,
readonly = True,
states = {'draft': [('readonly',False)]},
track_visibility='onchange',
),
'grade_ids': fields.many2many(
'edu.grade',
'edu_student_program_grade_rel',
'st_program_id',
'grade_id',
'Grades',
readonly = True,
),
'work_ids': fields.many2many(
'edu.work',
'edu_work_st_program_rel',
'st_program_id',
'work_id',
'Training Work',
readonly = True,
),
'record_ids': fields.many2many(
'edu.record',
'edu_student_program_record_rel',
'st_program_id',
'record_id',
'Record',
readonly = True,
),
'image_medium': fields.related(
'student_id',
'image_medium',
type = 'binary',
string = 'Medium-sized image',
readonly = True,
),
'state': fields.function(
_get_state,
type = 'selection',
selection = EDU_STATES,
string = 'State',
store = {
'edu.student.program': (lambda self, cr, uid, ids, c={}: ids, ['stage_id'], 10),
'edu.stage': (_update_list_by_stage, ['state'], 20),
},
readonly = True,
),
}
# Default Values
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
stage_ids = self.pool.get('edu.stage').search(cr, uid, [('state','=','draft')], order='sequence', context=context)
if stage_ids:
return stage_ids[0]
return False
_defaults = {
'stage_id': _get_default_stage_id,
'state': 'draft',
'status': 'student',
'code': '/',
}
# SQL Constraints
_sql_constraints = [
('student_program_uniq', 'unique(student_id, program_id)', 'Program must be unique per Student!'),
('code_uniq', 'unique(code)', 'Code must be unique!')
]
# Sorting Order
_order = 'program_id,stage_id,group_id,student_id'
| agpl-3.0 | 1,469,602,133,691,813,600 | 39.556851 | 147 | 0.4651 | false | 4.22057 | false | false | false |
jhuapl-boss/intern | intern/service/dvid/volume.py | 1 | 6479 | # Copyright 2020 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from intern.resource.dvid import DataInstanceResource, RepositoryResource
from intern.service.dvid import DVIDService
from intern.utils.parallel import *
from requests import HTTPError
import requests
import numpy as np
import json
import blosc
def check_data_instance(fcn):
"""Decorator that ensures a valid data instance is passed in.
Args:
fcn (function): Function that has a DataInstanceResource as its second argument.
Returns:
(function): Wraps given function with one that checks for a valid data instance.
"""
def wrapper(*args, **kwargs):
if not isinstance(args[1], DataInstanceResource):
raise RuntimeError(
"resource must be an instance of intern.resource.intern.DataInstanceResource."
)
return fcn(*args, **kwargs)
return wrapper
class VolumeService(DVIDService):
"""VolumeService for DVID service.
"""
def __init__(self, base_url):
"""Constructor.
Args:
base_url (str): Base url (host) of project service.
Raises:
(KeyError): if given invalid version.
"""
DVIDService.__init__(self)
self.base_url = base_url
@check_data_instance
def get_cutout(self, resource, resolution, x_range, y_range, z_range, **kwargs):
"""Download a cutout from DVID data store.
Args:
resource (intern.resource.resource.Resource): Resource compatible
with cutout operations
resolution (int): 0 (not applicable on DVID Resource).
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
chunk_size (optional Tuple[int, int, int]): The chunk size to request
Returns:
(numpy.array): A 3D or 4D numpy matrix in ZXY(time) order.
Raises:
requests.HTTPError
"""
x_size = x_range[1] - x_range[0]
y_size = y_range[1] - y_range[0]
z_size = z_range[1] - z_range[0]
# Make the request
resp = requests.get(
"{}/api/node/{}/{}/raw/0_1_2/{}_{}_{}/{}_{}_{}/octet-stream".format(
self.base_url,
resource.UUID,
resource.name,
x_size,
y_size,
z_size,
x_range[0],
y_range[0],
z_range[0],
)
)
if resp.status_code != 200 or resp.status_code == 201:
msg = "Get cutout failed on {}, got HTTP response: ({}) - {}".format(
resource.name, resp.status_code, resp.text
)
raise HTTPError(msg, response=resp)
block = np.frombuffer(resp.content, dtype=resource.datatype)
cutout = block.reshape(z_size, y_size, x_size)
return cutout
@check_data_instance
def create_cutout(
self, resource, resolution, x_range, y_range, z_range, numpyVolume, send_opts
):
"""Upload a cutout to the volume service.
NOTE: This method will fail if no metadata has been added to the data instance.
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
numpyVolume (numpy.array): A 3D or 4D (time) numpy matrix in (time)ZYX order.
send_opts (dictionary): Additional arguments to pass to session.send().
"""
# Check that the data array is C Contiguous
blktypes = ["uint8blk", "labelblk", "rgba8blk"]
if not numpyVolume.flags["C_CONTIGUOUS"]:
numpyVolume = np.ascontiguousarray(numpyVolume)
if resource._type == "tile":
# Compress the data
# NOTE: This is a convenient way for compressing/decompressing NumPy arrays, however
# this method uses pickle/unpickle which means we make additional copies that consume
# a bit of extra memory and time.
compressed = blosc.pack_array(numpyVolume)
url_req = "{}/api/node/{}/{}/tile/xy/{}/{}_{}_{}".format(
self.base_url,
resource.UUID,
resource.name,
resolution,
x_range[0],
y_range[0],
z_range[0],
)
out_data = compressed
# Make the request
elif resource._type in blktypes:
numpyVolume = numpyVolume.tobytes(order="C")
url_req = "{}/api/node/{}/{}/raw/0_1_2/{}_{}_{}/{}_{}_{}".format(
self.base_url,
resource.UUID,
resource.name,
x_range[1] - x_range[0],
y_range[1] - y_range[0],
z_range[1] - z_range[0],
x_range[0],
y_range[0],
z_range[0],
)
out_data = numpyVolume
else:
raise NotImplementedError(
"{} type is not yet implemented in create_cutout".format(resource._type)
)
resp = requests.post(url_req, data=out_data)
if resp.status_code != 200 or resp.status_code == 201:
msg = "Create cutout failed on {}, got HTTP response: ({}) - {}".format(
resource.name, resp.status_code, resp.text
)
raise HTTPError(msg, response=resp)
return
| apache-2.0 | 7,961,849,185,573,913,000 | 36.022857 | 97 | 0.572156 | false | 4.100633 | false | false | false |
conejoninja/pelisalacarta | python/version-command-line/download_url.py | 1 | 1720 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# Script for downloading files from any server supported on pelisalacarta
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#-------------------------------------------------------------------------
import re,urllib,urllib2,sys,os
sys.path.append ("lib")
from core import config
config.set_setting("debug","true")
from core import scrapertools
from core import downloadtools
from core.item import Item
from servers import servertools
def download_url(url,titulo,server):
url = url.replace("\\","")
print "Analizando enlace "+url
# Averigua el servidor
if server=="":
itemlist = servertools.find_video_items(data=url)
if len(itemlist)==0:
print "No se puede identificar el enlace"
return
item = itemlist[0]
print "Es un enlace en "+item.server
else:
item = Item()
item.server = server
# Obtiene las URL de descarga
video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(item.server,url)
if len(video_urls)==0:
print "No se ha encontrado nada para descargar"
return
# Descarga el de mejor calidad, como hace pelisalacarta
print "Descargando..."
print video_urls
devuelve = downloadtools.downloadbest(video_urls,titulo,continuar=True)
if __name__ == "__main__":
url = sys.argv[1]
title = sys.argv[2]
if len(sys.argv)>=4:
server = sys.argv[3]
else:
server = ""
if title.startswith("http://") or title.startswith("https://"):
url = sys.argv[2]
title = sys.argv[1]
download_url(url,title,server)
| gpl-3.0 | 2,455,178,989,207,028,000 | 26.741935 | 92 | 0.588372 | false | 3.659574 | false | false | false |
cjonsmith/python-challenge | problem_05.py | 1 | 1162 | """The first hint for this problem is the title of the webpage: 'peak hell'.
When pronounced, it sounds very similar to 'pickle', which is the builtin
python object serialization package. When viewing the the source code of the
webpage, there is a 'peakhell' tag that links to a pickle file. We'll download
the file (prompting the user if they are okay with deserializing the file) then
view its contents."""
import pickle
import requests
import webbrowser
from bs4 import BeautifulSoup
webpage = "http://www.pythonchallenge.com/pc/def/peak.html"
r = requests.get(webpage)
soup = BeautifulSoup(r.content, "html.parser")
peakhell = soup.find("peakhell")["src"]
split_page = webpage.split("peak.html")
pickle_file = f"{split_page[0]}{peakhell}"
r = requests.get(pickle_file)
with open(peakhell, "wb") as fp:
fp.write(r.content)
# Print out each line to the console.
msg = pickle.load(open(peakhell, "rb"))
line = ""
for lst in msg:
for tup in lst:
line += tup[0] * tup[1]
print(line)
line = ""
print("opening new webpage...")
split_page = webpage.split("peak.html")
new_page = f"{split_page[0]}channel.html"
webbrowser.open(new_page)
| mit | 5,570,560,142,735,886,000 | 30.405405 | 79 | 0.718589 | false | 3.218837 | false | false | false |
metagriffin/pysyncml | pysyncml/model/store.py | 1 | 17402 | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: metagriffin <[email protected]>
# date: 2012/06/14
# copy: (C) Copyright 2012-EOT metagriffin -- see LICENSE.txt
#------------------------------------------------------------------------------
# This software is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#------------------------------------------------------------------------------
'''
The ``pysyncml.model.store`` provides a SyncML datastore abstraction
via the :class:`pysyncml.model.store.Store` class, which includes both
the datastore meta information and, if the datastore is local, an
agent to execute data interactions.
'''
import sys, json, logging
import xml.etree.ElementTree as ET
from sqlalchemy import Column, Integer, Boolean, String, Text, ForeignKey
from sqlalchemy.orm import relation, synonym, backref
from sqlalchemy.orm.exc import NoResultFound
from .. import common, constants, ctype
log = logging.getLogger(__name__)
#------------------------------------------------------------------------------
def decorateModel(model):
#----------------------------------------------------------------------------
class Store(model.DatabaseObject):
allSyncTypes = [
constants.SYNCTYPE_TWO_WAY,
constants.SYNCTYPE_SLOW_SYNC,
constants.SYNCTYPE_ONE_WAY_FROM_CLIENT,
constants.SYNCTYPE_REFRESH_FROM_CLIENT,
constants.SYNCTYPE_ONE_WAY_FROM_SERVER,
constants.SYNCTYPE_REFRESH_FROM_SERVER,
constants.SYNCTYPE_SERVER_ALERTED,
]
adapter_id = Column(Integer, ForeignKey('%s_adapter.id' % (model.prefix,),
onupdate='CASCADE', ondelete='CASCADE'),
nullable=False, index=True)
adapter = relation('Adapter', backref=backref('_stores', # order_by=id,
cascade='all, delete-orphan',
passive_deletes=True))
uri = Column(String(4095), nullable=False, index=True)
displayName = Column(String(4095))
_syncTypes = Column('syncTypes', String(4095)) # note: default set in __init__
maxGuidSize = Column(Integer) # note: default set in __init__
maxObjSize = Column(Integer) # note: default set in __init__
_conflictPolicy = Column('conflictPolicy', Integer)
agent = None
@property
def syncTypes(self):
return json.loads(self._syncTypes or 'null')
@syncTypes.setter
def syncTypes(self, types):
self._syncTypes = json.dumps(types)
@property
def contentTypes(self):
if self.agent is not None:
return self.agent.contentTypes
return self._contentTypes
@property
def conflictPolicy(self):
if self._conflictPolicy is not None:
return self._conflictPolicy
# todo: this assumes that this store is the local one...
return self.adapter.conflictPolicy
@conflictPolicy.setter
def conflictPolicy(self, policy):
self._conflictPolicy = policy
@property
def peer(self):
return self.getPeerStore()
#--------------------------------------------------------------------------
def getPeerStore(self, adapter=None):
if not self.adapter.isLocal:
if adapter is None:
# todo: implement this...
raise common.InternalError('local adapter is required for call to remoteStore.getPeerStore()')
uri = adapter.router.getSourceUri(self.uri, mustExist=False)
if uri is None:
return None
return adapter.stores[uri]
if self.adapter.peer is None:
return None
ruri = self.adapter.router.getTargetUri(self.uri, mustExist=False)
if ruri is None:
return None
return self.adapter.peer.stores[ruri]
#--------------------------------------------------------------------------
def __init__(self, **kw):
# TODO: this is a little hack... it is because the .merge() will
# otherwise override valid values with null values when the merged-in
# store has not been flushed, and because this is a valid value,
# open flush, is being nullified. ugh.
# NOTE: the default is set here, not in the Column() definition, so that
# NULL values remain NULL during a flush) - since they are valid.
self._syncTypes = kw.get('syncTypes', repr(Store.allSyncTypes))
self.maxGuidSize = kw.get('maxGuidSize', common.getAddressSize())
self.maxObjSize = kw.get('maxObjSize', common.getMaxMemorySize())
super(Store, self).__init__(**kw)
#----------------------------------------------------------------------------
def __repr__(self):
ret = '<Store "%s": uri=%s' % (self.displayName or self.uri, self.uri)
if self.maxGuidSize is not None:
ret += '; maxGuidSize=%d' % (self.maxGuidSize,)
if self.maxObjSize is not None:
ret += '; maxObjSize=%d' % (self.maxObjSize,)
if self.syncTypes is not None and len(self.syncTypes) > 0:
ret += '; syncTypes=%s' % (','.join([str(st) for st in self.syncTypes]),)
if self.contentTypes is not None and len(self.contentTypes) > 0:
ret += '; contentTypes=%s' % (','.join([str(ct) for ct in self.contentTypes]),)
return ret + '>'
#----------------------------------------------------------------------------
def merge(self, store):
if self.uri != store.uri:
raise common.InternalError('unexpected merging of stores with different URIs (%s != %s)'
% (self.uri, store.uri))
self.displayName = store.displayName
if cmp(self._contentTypes, store._contentTypes) != 0:
# todo: this is a bit drastic... perhaps have an operational setting
# which controls how paranoid to be?...
self.binding = None
self._contentTypes = [e.clone() for e in store._contentTypes]
self.syncTypes = store.syncTypes
self.maxGuidSize = store.maxGuidSize
self.maxObjSize = store.maxObjSize
self.agent = store.agent
return self
#----------------------------------------------------------------------------
def clearChanges(self):
if self.adapter.isLocal:
# TODO: THIS NEEDS TO BE SIGNIFICANTLY OPTIMIZED!... either:
# a) optimize this reverse lookup, or
# b) use a query that targets exactly the set of stores needed
# note that a pre-emptive model.session.flush() may be necessary.
for peer in self.adapter.getKnownPeers():
for store in peer._stores:
if store.binding is not None and store.binding.uri == self.uri:
store.clearChanges()
return
if self.id is None:
model.session.flush()
model.Change.q(store_id=self.id).delete()
#----------------------------------------------------------------------------
def registerChange(self, itemID, state, changeSpec=None, excludePeerID=None):
if self.adapter.isLocal:
# TODO: THIS NEEDS TO BE SIGNIFICANTLY OPTIMIZED!... either:
# a) optimize this reverse lookup, or
# b) use a query that targets exactly the set of stores needed
# note that a pre-emptive model.session.flush() may be necessary.
for peer in self.adapter.getKnownPeers():
if excludePeerID is not None and peer.id == excludePeerID:
continue
for store in peer._stores:
if store.binding is not None and store.binding.uri == self.uri:
store.registerChange(itemID, state, changeSpec=changeSpec)
return
if self.id is None:
model.session.flush()
itemID = str(itemID)
change = None
if changeSpec is not None:
try:
change = model.Change.q(store_id=self.id, itemID=itemID).one()
change.state = state
if change.changeSpec is not None:
change.changeSpec += ';' + changeSpec
if len(change.changeSpec) > model.Change.c.changeSpec.type.length:
change.changeSpec = None
except NoResultFound:
change = None
if change is None:
model.Change.q(store_id=self.id, itemID=itemID).delete()
change = model.Change(store_id=self.id, itemID=itemID,
state=state, changeSpec=changeSpec)
model.session.add(change)
#--------------------------------------------------------------------------
def getRegisteredChanges(self):
return model.Change.q(store_id=self.id)
#----------------------------------------------------------------------------
def describe(self, s1):
s2 = common.IndentStream(s1)
s3 = common.IndentStream(s2)
print >>s1, self.displayName or self.uri
print >>s2, 'URI:', self.uri
print >>s2, 'Sync types:', ','.join([str(e) for e in self.syncTypes or []])
print >>s2, 'Max ID size:', self.maxGuidSize or '(none)'
print >>s2, 'Max object size:', self.maxObjSize or '(none)'
print >>s2, 'Capabilities:'
for cti in self.contentTypes or []:
cti.describe(s3)
#----------------------------------------------------------------------------
def toSyncML(self):
xstore = ET.Element('DataStore')
if self.uri is not None:
ET.SubElement(xstore, 'SourceRef').text = self.uri
if self.displayName is not None:
ET.SubElement(xstore, 'DisplayName').text = self.displayName
if self.maxGuidSize is not None:
# todo: this should ONLY be sent by the client... (according to the
# spec, but not according to funambol behavior...)
ET.SubElement(xstore, 'MaxGUIDSize').text = str(self.maxGuidSize)
if self.maxObjSize is not None:
ET.SubElement(xstore, 'MaxObjSize').text = str(self.maxObjSize)
if self.contentTypes is not None:
rxpref = [ct for ct in self.contentTypes if ct.receive and ct.preferred]
if len(rxpref) > 1:
raise common.InvalidAgent('agents can prefer at most one rx content-type, not %r' % (rxpref,))
if len(rxpref) == 1:
for idx, xnode in enumerate(rxpref[0].toSyncML('Rx-Pref', uniqueVerCt=True)):
if idx != 0:
xnode.tag = 'Rx'
xstore.append(xnode)
for rx in [ct for ct in self.contentTypes if ct.receive and not ct.preferred]:
for xnode in rx.toSyncML('Rx', uniqueVerCt=True):
xstore.append(xnode)
txpref = [ct for ct in self.contentTypes if ct.transmit and ct.preferred]
if len(txpref) > 1:
raise common.InvalidAgent('agents can prefer at most one tx content-type, not %r' % (txpref,))
if len(txpref) == 1:
for idx, xnode in enumerate(txpref[0].toSyncML('Tx-Pref', uniqueVerCt=True)):
if idx != 0:
xnode.tag = 'Tx'
xstore.append(xnode)
for tx in [ct for ct in self.contentTypes if ct.transmit and not ct.preferred]:
for xnode in tx.toSyncML('Tx', uniqueVerCt=True):
xstore.append(xnode)
if self.syncTypes is not None and len(self.syncTypes) > 0:
xcap = ET.SubElement(xstore, 'SyncCap')
for st in self.syncTypes:
ET.SubElement(xcap, 'SyncType').text = str(st)
return xstore
#----------------------------------------------------------------------------
@staticmethod
def fromSyncML(xnode):
store = model.Store()
store.uri = xnode.findtext('SourceRef')
store.displayName = xnode.findtext('DisplayName')
store.maxGuidSize = xnode.findtext('MaxGUIDSize')
if store.maxGuidSize is not None:
store.maxGuidSize = int(store.maxGuidSize)
store.maxObjSize = xnode.findtext('MaxObjSize')
if store.maxObjSize is not None:
store.maxObjSize = int(store.maxObjSize)
store.syncTypes = [int(x.text) for x in xnode.findall('SyncCap/SyncType')]
store._contentTypes = []
for child in xnode:
if child.tag not in ('Tx-Pref', 'Tx', 'Rx-Pref', 'Rx'):
continue
cti = model.ContentTypeInfo.fromSyncML(child)
for curcti in store._contentTypes:
if curcti.merge(cti):
break
else:
store._contentTypes.append(cti)
return store
#----------------------------------------------------------------------------
class ContentTypeInfo(model.DatabaseObject, ctype.ContentTypeInfoMixIn):
store_id = Column(Integer, ForeignKey('%s_store.id' % (model.prefix,),
onupdate='CASCADE', ondelete='CASCADE'),
nullable=False, index=True)
store = relation('Store', backref=backref('_contentTypes', # order_by=id,
cascade='all, delete-orphan',
passive_deletes=True))
ctype = Column(String(4095))
_versions = Column('versions', String(4095))
preferred = Column(Boolean, default=False)
transmit = Column(Boolean, default=True)
receive = Column(Boolean, default=True)
@property
def versions(self):
return json.loads(self._versions or 'null')
@versions.setter
def versions(self, types):
self._versions = json.dumps(types)
def clone(self):
# TODO: this should be moved into `model.DatabaseObject`
# see:
# https://groups.google.com/forum/?fromgroups#!topic/sqlalchemy/bhYvmnRpegE
# http://www.joelanman.com/2008/09/making-a-copy-of-a-sqlalchemy-object/
return ContentTypeInfo(ctype=self.ctype, _versions=self._versions,
preferred=self.preferred, transmit=self.transmit, receive=self.receive)
def __str__(self):
return ctype.ContentTypeInfoMixIn.__str__(self)
def __repr__(self):
return ctype.ContentTypeInfoMixIn.__repr__(self)
def __cmp__(self, other):
for attr in ('ctype', 'versions', 'preferred', 'transmit', 'receive'):
ret = cmp(getattr(self, attr), getattr(other, attr))
if ret != 0:
return ret
return 0
#----------------------------------------------------------------------------
class Binding(model.DatabaseObject):
# todo: since store <=> binding is one-to-one, shouldn't this be a primary key?...
store_id = Column(Integer, ForeignKey('%s_store.id' % (model.prefix,),
onupdate='CASCADE', ondelete='CASCADE'),
nullable=False, index=True)
targetStore = relation('Store', backref=backref('binding', uselist=False,
cascade='all, delete-orphan',
passive_deletes=True))
# todo: this uri *could* be replaced by an actual reference to the Store object...
# and then the getSourceStore() method can go away...
# *BUT* this would require a one-to-many Adapter<=>Adapter relationship...
uri = Column(String(4095), nullable=True)
autoMapped = Column(Boolean)
sourceAnchor = Column(String(4095), nullable=True)
targetAnchor = Column(String(4095), nullable=True)
def getSourceStore(self, adapter):
return adapter.stores[self.uri]
#----------------------------------------------------------------------------
class Change(model.DatabaseObject):
store_id = Column(Integer, ForeignKey('%s_store.id' % (model.prefix,),
onupdate='CASCADE', ondelete='CASCADE'),
nullable=False, index=True)
# store = relation('Store', backref=backref('changes',
# cascade='all, delete-orphan',
# passive_deletes=True))
itemID = Column(String(4095), index=True, nullable=False)
state = Column(Integer)
registered = Column(Integer, default=common.ts)
changeSpec = Column(String(4095))
model.Store = Store
model.ContentTypeInfo = ContentTypeInfo
model.Binding = Binding
model.Change = Change
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
| gpl-3.0 | 4,451,211,935,199,341,600 | 45.90566 | 104 | 0.545914 | false | 4.324553 | false | false | false |
ashishbaghudana/mthesis-ashish | resources/tees/Utils/DetectHeads.py | 2 | 18739 | import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/..")
import Core.SentenceGraph as SentenceGraph
from Utils.ProgressCounter import ProgressCounter
from FindHeads import findHeads
import Utils.ElementTreeUtils as ETUtils
import Utils.InteractionXML.CorpusElements
import Utils.Range as Range
import Utils.Libraries.PorterStemmer as PorterStemmer
def getTriggers(corpus):
"""
Returns a dictionary of "entity type"->"entity text"->"count"
"""
corpus = ETUtils.ETFromObj(corpus)
trigDict = {}
for entity in corpus.getroot().getiterator("entity"):
if entity.get("given") == "True":
continue
eType = entity.get("type")
if not trigDict.has_key(eType):
trigDict[eType] = {}
eText = entity.get("text")
eText = PorterStemmer.stem(eText)
if not trigDict[eType].has_key(eText):
trigDict[eType][eText] = 0
trigDict[eType][eText] += 1
return trigDict
def getDistribution(trigDict):
"""
Converts a dictionary of "entity type"->"entity text"->"count"
to "entity text"->"entity type"->"(count, fraction)"
"""
distDict = {}
eTypes = trigDict.keys()
for eType in trigDict.keys():
for string in trigDict[eType].keys():
if not distDict.has_key(string):
distDict[string] = {}
for e in eTypes:
distDict[string][e] = [0, None]
distDict[string][eType] = [trigDict[eType][string], None]
# define ratios
for string in distDict.keys():
count = 0.0
for eType in distDict[string].keys():
count += distDict[string][eType][0]
for eType in distDict[string].keys():
distDict[string][eType][1] = distDict[string][eType][0] / count
return distDict
def getHeads(corpus):
corpus = ETUtils.ETFromObj(corpus)
headDict = {}
headDict["None"] = {}
for sentence in corpus.getiterator("sentence"):
headOffsetStrings = set()
for entity in sentence.findall("entity"):
eType = entity.get("type")
if not headDict.has_key(eType):
headDict[eType] = {}
eText = entity.get("text")
headOffset = entity.get("headOffset")
headOffsetStrings.add(headOffset)
headOffset = Range.charOffsetToSingleTuple(headOffset)
charOffset = Range.charOffsetToSingleTuple(entity.get("charOffset"))
if headOffset == charOffset:
if not headDict[eType].has_key(eText): headDict[eType][eText] = 0
headDict[eType][eText] += 1
else:
headText = sentenceText[headOffset[0]-charOffset[0]:headOffset[1]-charOffset[0]+1]
if not headDict[eType].has_key(headText): headDict[eType][headText] = 0
headDict[eType][headText] += 1
for token in tokens:
if not token.get("charOffset") in headOffsetStrings: # token is not the head of any entity
headText = token.get("text")
if not headDict["None"].has_key(headText): headDict["None"][headText] = 0
headDict["None"][headText] += 1
return headDict
def getOverlap():
pass
def removeHeads(corpus):
print >> sys.stderr, "Removing existing head offsets"
removeCount = 0
xml = ETUtils.ETFromObj(corpus)
for d in xml.getroot().findall("document"):
for s in d.findall("sentence"):
for e in s.findall("entity"):
if e.get("headOffset") != None:
removeCount += 1
del e.attrib["headOffset"]
print >> sys.stderr, "Removed head offsets from", removeCount, "entities"
return [0, removeCount]
def findHeads(corpus, stringsFrom, methods, parse, tokenization):
for m in methods:
assert m in ["REMOVE", "SYNTAX", "DICT"]
corpus = ETUtils.ETFromObj(corpus)
counts = {}
for method in methods:
print >> sys.stderr, method, "pass"
if method == "REMOVE":
counts[method] = removeHeads(corpus)
elif method == "DICT":
counts[method] = findHeadsDictionary(corpus, stringsFrom, parse, tokenization)
elif method == "SYNTAX":
counts[method] = findHeadsSyntactic(corpus, parse, tokenization)
print >> sys.stderr, method, "pass added", counts[method][0], "and removed", counts[method][1], "heads"
print >> sys.stderr, "Summary (pass/added/removed):"
for method in methods:
print >> sys.stderr, " ", method, "/", counts[method][0], "/", counts[method][1]
def mapSplits(splits, string, stringOffset):
"""
Maps substrings to a string, and stems them
"""
begin = 0
tuples = []
for split in splits:
offset = string.find(split, begin)
assert offset != -1
tuples.append( (split, PorterStemmer.stem(split), (offset,len(split))) )
begin = offset + len(split)
return tuples
def findHeadsDictionary(corpus, stringsFrom, parse, tokenization):
print "Extracting triggers from", stringsFrom
trigDict = getTriggers(stringsFrom)
print "Determining trigger distribution"
distDict = getDistribution(trigDict)
allStrings = sorted(distDict.keys())
print "Determining heads for", corpus
corpusElements = Utils.InteractionXML.CorpusElements.loadCorpus(corpus, parse, tokenization, removeIntersentenceInteractions=False, removeNameInfo=False)
cases = {}
counts = [0,0]
for sentence in corpusElements.sentences:
#print sentence.sentence.get("id")
sText = sentence.sentence.get("text")
#tokenHeadScores = None
for entity in sentence.entities:
if entity.get("headOffset") != None:
continue
if entity.get("given") == "True": # Only for triggers
continue
#if tokenHeadScores == None:
# tokenHeadScores = getTokenHeadScores(sentence.tokens, sentence.dependencies, sentenceId=sentence.sentence.get("id"))
eText = entity.get("text")
eType = entity.get("type")
eOffset = Range.charOffsetToSingleTuple(entity.get("charOffset"))
wsSplits = eText.split() # Split by whitespace
if len(wsSplits) == 1 and eText.find("-") == -1: # unambiguous head will be assigned by SYNTAX pass
continue
else: # Entity text has multiple (whitespace or hyphen separated) parts
candidates = []
# Try to find entity substring in individual entity strings
for wsTuple in mapSplits(wsSplits, eText, eOffset):
if not distDict.has_key(wsTuple[1]): # string not found, low score
candidates.append( ((-1, -1), wsTuple[2], wsTuple[0], wsTuple[1]) )
else: # String found, more common ones get higher score
assert distDict[wsTuple[1]].has_key(eType), (distDict[wsTuple[0]], wsTuple[0], eText)
candidates.append( (tuple(distDict[wsTuple[1]][eType]), wsTuple[2], wsTuple[0], wsTuple[1]) )
# Split each whitespace-separated string further into hyphen-separated substrings
for candidate in candidates[:]:
hyphenSplits = candidate[2].split("-")
if len(hyphenSplits) > 1: # Substring has a hyphen
# Try to find entity substring in individual entity strings
for hyphenTuple in mapSplits(hyphenSplits, eText, candidate[1]):
if not distDict.has_key(hyphenTuple[1]):
candidates.append( ((-1, -1), hyphenTuple[2], hyphenTuple[0], hyphenTuple[1]) )
else:
candidates.append( (tuple(distDict[hyphenTuple[1]][eType]), hyphenTuple[2], hyphenTuple[0], hyphenTuple[1]) )
# Sort candidates, highes scores come first
candidates.sort(reverse=True)
# If not matches, look for substrings inside words
if candidates[0][0][0] in [-1, 0]: # no matches, look for substrings
print "Substring matching", candidates, "for entity", entity.get("id")
for i in range(len(candidates)):
candidate = candidates[i]
cText = candidate[2]
for string in allStrings:
subStringPos = cText.find(string)
if subStringPos != -1:
print " Substring match", string, cText,
score = tuple(distDict[string][eType])
if score > candidate[0]:
print score, candidate[0], "Substring selected" #, score > candidate[0], score < candidate[0]
subStringCoords = [candidate[1][0] + subStringPos, len(string)]
candidate = (score, subStringCoords, candidate[2], ">"+string+"<")
else:
print score, candidate[0]
candidates[i] = candidate
# Resort after possibly replacing some candidates
candidates.sort(reverse=True)
if candidates[0][0][0] not in [-1, 0]: # if it is in [-1, 0], let SYNTAX pass take care of it
candidateOffset = (candidates[0][1][0] + eOffset[0], candidates[0][1][0] + candidates[0][1][1] + eOffset[0])
entity.set("headOffset", str(candidateOffset[0]) + "-" + str(candidateOffset[1]-1))
entity.set("headMethod", "Dict")
entity.set("headString", sText[candidateOffset[0]:candidateOffset[1]])
counts[0] += 1
# Prepare results for printing
for i in range(len(candidates)):
c = candidates[i]
candidates[i] = (tuple(c[0]), c[2], c[3])
case = (eType, eText, tuple(candidates))
if not cases.has_key(case):
cases[case] = 0
cases[case] += 1
print entity.get("id"), eType + ": '" + eText + "'", candidates
#headToken = getEntityHeadToken(entity, sentence.tokens, tokenHeadScores)
# The ElementTree entity-element is modified by setting the headOffset attribute
#entity.set("headOffset", headToken.get("charOffset"))
#entity.set("headMethod", "Syntax")
print "Cases"
for case in sorted(cases.keys()):
print case, cases[case]
#return corpus
return counts
def findHeadsSyntactic(corpus, parse, tokenization):
"""
Determine the head token for a named entity or trigger. The head token is the token closest
to the root for the subtree of the dependency parse spanned by the text of the element.
@param entityElement: a semantic node (trigger or named entity)
@type entityElement: cElementTree.Element
@param verbose: Print selected head tokens on screen
@param verbose: boolean
"""
counts = [0,0]
sentences = [x for x in corpus.getiterator("sentence")]
counter = ProgressCounter(len(sentences), "SYNTAX")
for sentence in sentences:
counter.update()
tokElement = ETUtils.getElementByAttrib(sentence, "sentenceanalyses/tokenizations/tokenization", {"tokenizer":tokenization})
parseElement = ETUtils.getElementByAttrib(sentence, "sentenceanalyses/parses/parse", {"parser":parse})
if tokElement == None or parseElement == None:
print >> sys.stderr, "Warning, sentence", sentence.get("id"), "missing parse or tokenization"
tokens = tokElement.findall("token")
tokenHeadScores = getTokenHeadScores(tokens, parseElement.findall("dependency"), sentenceId=sentence.get("id"))
for entity in sentence.findall("entity"):
if entity.get("headOffset") == None:
headToken = getEntityHeadToken(entity, tokens, tokenHeadScores)
# The ElementTree entity-element is modified by setting the headOffset attribute
entity.set("headOffset", headToken.get("charOffset"))
entity.set("headMethod", "Syntax")
entity.set("headString", headToken.get("text"))
counts[0] += 1
return counts
def getEntityHeadToken(entity, tokens, tokenHeadScores):
if entity.get("headOffset") != None:
charOffsets = Range.charOffsetToTuples(entity.get("headOffset"))
elif entity.get("charOffset") != "":
charOffsets = Range.charOffsetToTuples(entity.get("charOffset"))
else:
charOffsets = []
# Each entity can consist of multiple syntactic tokens, covered by its
# charOffset-range. One of these must be chosen as the head token.
headTokens = [] # potential head tokens
for token in tokens:
tokenOffset = Range.charOffsetToSingleTuple(token.get("charOffset"))
for offset in charOffsets:
if Range.overlap(offset, tokenOffset):
headTokens.append(token)
if len(headTokens)==1: # An unambiguous head token was found
selectedHeadToken = headTokens[0]
else: # One head token must be chosen from the candidates
selectedHeadToken = findHeadToken(headTokens, tokenHeadScores)
#if verbose:
# print >> sys.stderr, "Selected head:", token.attrib["id"], token.attrib["text"]
assert selectedHeadToken != None, entityElement.get("id")
return selectedHeadToken
def findHeadToken(candidateTokens, tokenHeadScores):
"""
Select the candidate token that is closest to the root of the subtree of the depencdeny parse
to which the candidate tokens belong to. See getTokenHeadScores method for the algorithm.
@param candidateTokens: the list of syntactic tokens from which the head token is selected
@type candidateTokens: list of cElementTree.Element objects
"""
if len(candidateTokens) == 0:
return None
highestScore = -9999999
bestTokens = []
for token in candidateTokens:
if tokenHeadScores[token] > highestScore:
highestScore = tokenHeadScores[token]
for token in candidateTokens:
if tokenHeadScores[token] == highestScore:
bestTokens.append(token)
return bestTokens[-1]
def getTokenHeadScores(tokens, dependencies, sentenceId=None):
"""
A head token is chosen using a heuristic that prefers tokens closer to the
root of the dependency parse. In a list of candidate tokens, the one with
the highest score is the head token. The return value of this method
is a dictionary that maps token elements to their scores.
"""
tokenHeadScores = {}
# Give all tokens initial scores
for token in tokens:
tokenHeadScores[token] = 0 # initialize score as zero (unconnected token)
for dependency in dependencies:
if dependency.get("t1") == token.get("id") or dependency.get("t2") == token.get("id"):
tokenHeadScores[token] = 1 # token is connected by a dependency
break
# Give a low score for tokens that clearly can't be head and are probably produced by hyphen-splitter
for token in tokens:
tokenText = token.get("text")
if tokenText == "\\" or tokenText == "/" or tokenText == "-":
tokenHeadScores[token] = -1
# Loop over all dependencies and increase the scores of all governor tokens
# until each governor token has a higher score than its dependent token.
# Some dependencies might form a loop so a list is used to define those
# dependency types used in determining head scores.
depTypesToInclude = ["prep", "nn", "det", "hyphen", "num", "amod", "nmod", "appos", "measure", "dep", "partmod"]
#depTypesToRemoveReverse = ["A/AN"]
modifiedScores = True
loopCount = 0 # loopcount for devel set approx. 2-4
while modifiedScores == True: # loop until the scores no longer change
if loopCount > 20: # survive loops
print >> sys.stderr, "Warning, possible loop in parse for sentence", sentenceId
break
modifiedScores = False
for token1 in tokens:
for token2 in tokens: # for each combination of tokens...
for dep in dependencies: # ... check each dependency
if dep.get("t1") == token1.get("id") and dep.get("t2") == token2.get("id") and (dep.get("type") in depTypesToInclude):
# The governor token of the dependency must have a higher score
# than the dependent token.
if tokenHeadScores[token1] <= tokenHeadScores[token2]:
tokenHeadScores[token1] = tokenHeadScores[token2] + 1
modifiedScores = True
loopCount += 1
return tokenHeadScores
if __name__=="__main__":
import sys
print >> sys.stderr, "##### Calculating entity head token offsets #####"
from optparse import OptionParser
# Import Psyco if available
try:
import psyco
psyco.full()
print >> sys.stderr, "Found Psyco, using"
except ImportError:
print >> sys.stderr, "Psyco not installed"
optparser = OptionParser(usage="%prog [options]\nRecalculate head token offsets.")
optparser.add_option("-i", "--input", default=None, dest="input", help="Corpus in interaction xml format", metavar="FILE")
optparser.add_option("-o", "--output", default=None, dest="output", help="Output file in interaction xml format.")
optparser.add_option("-d", "--dictionary", default=None, dest="dictionary", help="Corpus file to use as dictionary of entity strings.")
optparser.add_option("-m", "--methods", default=None, dest="methods", help="")
optparser.add_option("-p", "--parse", default="split-McClosky", dest="parse", help="Parse element name for calculating head offsets")
optparser.add_option("-t", "--tokenization", default="split-McClosky", dest="tokenization", help="Tokenization element name for calculating head offsets")
(options, args) = optparser.parse_args()
print >> sys.stderr, "Loading corpus"
corpus = ETUtils.ETFromObj(options.input)
print >> sys.stderr, "Finding heads"
findHeads(corpus, options.dictionary, ["REMOVE", "DICT", "SYNTAX"], options.parse, options.tokenization)
#findHeadsDictionary(corpus, options.parse, options.tokenization)
if options.output != None:
print >> sys.stderr, "Writing corpus"
ETUtils.write(corpus, options.output) | mit | 1,266,903,294,218,379,500 | 48.708223 | 158 | 0.613853 | false | 4.167 | false | false | false |
NickShaffner/rhea | test/test_cores/test_video/test_hdmi.py | 2 | 1811 |
from __future__ import print_function, division
import pytest
import myhdl
from myhdl import always, delay, instance, now, StopSimulation
import rhea
from rhea.system import Global
from rhea.cores.video import VideoStream, HDMIExtInterface
from rhea.cores.video import hdmi_xcvr
# a video desplay model to check the timings
from rhea.models.video import VideoDisplay
from rhea.utils.test import run_testbench
# @todo move cosimulation to cosimulation directory
# from _hdmi_prep_cosim import prep_cosim
# from interfaces import HDMI
def test_hdmi():
""" simple test to demonstrate test framework
"""
@myhdl.block
def bench_hdmi():
glbl = Global()
clock, reset = glbl.clock, glbl.reset
vid = VideoStream()
ext = HDMIExtInterface()
tbdut = hdmi_xcvr(glbl, vid, ext)
# clock for the design
@always(delay(5))
def tbclk():
clock.next = not clock
@instance
def tbstim():
yield delay(13)
reset.next = reset.active
yield delay(33)
reset.next = not reset.active
yield clock.posedge
try:
for ii in range(100):
yield delay(100)
except AssertionError as err:
print("@E: assertion error @ %d ns" % (now(),))
print(" %s" % (str(err),))
# additional simulation cycles after the error
yield delay(111)
raise err
except Exception as err:
print("@E: error occurred")
print(" %s" % (str(err),))
raise err
raise StopSimulation
return tbclk, tbstim
# run the above test
run_testbench(bench_hdmi)
| mit | -6,748,531,368,725,565,000 | 24.871429 | 63 | 0.573716 | false | 4.088036 | true | false | false |
mjrulesamrat/merchant | billing/integrations/authorize_net_dpm_integration.py | 4 | 5426 | from billing import Integration, IntegrationNotConfigured
from billing.forms.authorize_net_forms import AuthorizeNetDPMForm
from billing.signals import transaction_was_successful, transaction_was_unsuccessful
from django.conf import settings
from django.conf.urls import patterns, url
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.utils.decorators import method_decorator
from django.http import HttpResponseForbidden
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
import hashlib
import hmac
import urllib
csrf_exempt_m = method_decorator(csrf_exempt)
require_POST_m = method_decorator(require_POST)
class AuthorizeNetDpmIntegration(Integration):
display_name = "Authorize.Net Direct Post Method"
template = "billing/authorize_net_dpm.html"
def __init__(self):
super(AuthorizeNetDpmIntegration, self).__init__()
merchant_settings = getattr(settings, "MERCHANT_SETTINGS")
if not merchant_settings or not merchant_settings.get("authorize_net"):
raise IntegrationNotConfigured("The '%s' integration is not correctly "
"configured." % self.display_name)
self.authorize_net_settings = merchant_settings["authorize_net"]
def form_class(self):
return AuthorizeNetDPMForm
def generate_form(self):
transaction_key = self.authorize_net_settings["TRANSACTION_KEY"]
login_id = self.authorize_net_settings["LOGIN_ID"]
initial_data = self.fields
x_fp_hash = hmac.new(transaction_key, "%s^%s^%s^%s^" % (login_id,
initial_data['x_fp_sequence'],
initial_data['x_fp_timestamp'],
initial_data['x_amount']),
hashlib.md5)
initial_data.update({'x_login': login_id,
'x_fp_hash': x_fp_hash.hexdigest()})
form = self.form_class()(initial=initial_data)
return form
@property
def service_url(self):
if self.test_mode:
return "https://test.authorize.net/gateway/transact.dll"
return "https://secure.authorize.net/gateway/transact.dll"
def verify_response(self, request):
data = request.POST.copy()
md5_hash = self.authorize_net_settings["MD5_HASH"]
login_id = self.authorize_net_settings["LOGIN_ID"]
hash_str = "%s%s%s%s" % (md5_hash, login_id,
data.get("x_trans_id", ""),
data.get("x_amount", ""))
return hashlib.md5(hash_str).hexdigest() == data.get("x_MD5_Hash").lower()
@csrf_exempt_m
@require_POST_m
def authorizenet_notify_handler(self, request):
response_from_authorize_net = self.verify_response(request)
if not response_from_authorize_net:
return HttpResponseForbidden()
post_data = request.POST.copy()
result = post_data["x_response_reason_text"]
if request.POST['x_response_code'] == '1':
transaction_was_successful.send(sender=self,
type="sale",
response=post_data)
redirect_url = "%s?%s" % (request.build_absolute_uri(reverse("authorize_net_success_handler")),
urllib.urlencode({"response": result,
"transaction_id": request.POST["x_trans_id"]}))
return render_to_response("billing/authorize_net_relay_snippet.html",
{"redirect_url": redirect_url})
redirect_url = "%s?%s" % (request.build_absolute_uri(reverse("authorize_net_failure_handler")),
urllib.urlencode({"response": result}))
transaction_was_unsuccessful.send(sender=self,
type="sale",
response=post_data)
return render_to_response("billing/authorize_net_relay_snippet.html",
{"redirect_url": redirect_url})
def authorize_net_success_handler(self, request):
response = request.GET
return render_to_response("billing/authorize_net_success.html",
{"response": response},
context_instance=RequestContext(request))
def authorize_net_failure_handler(self, request):
response = request.GET
return render_to_response("billing/authorize_net_failure.html",
{"response": response},
context_instance=RequestContext(request))
def get_urls(self):
urlpatterns = patterns('',
url('^authorize_net-notify-handler/$', self.authorizenet_notify_handler, name="authorize_net_notify_handler"),
url('^authorize_net-sucess-handler/$', self.authorize_net_success_handler, name="authorize_net_success_handler"),
url('^authorize_net-failure-handler/$', self.authorize_net_failure_handler, name="authorize_net_failure_handler"),)
return urlpatterns
| bsd-3-clause | 8,007,233,141,908,801,000 | 49.240741 | 126 | 0.589016 | false | 4.465844 | false | false | false |
marcinkaszynski/burner-files | 2018-burning-man/sign2/effects.py | 1 | 6966 | import random
from math import cos, pi
from PIL import Image, ImageFilter
COLS = 20
ROWS = 12
def squash(v, low, high):
return min(max(low, v), high)
class FlatImage:
def __init__(self, file_name):
self.image = Image.open(file_name)
self.arr = self.image.load()
def tick(self, ts):
pass
def get_pixels(self):
return [self.arr[c, r][:3]
for r in range(0, ROWS)
for c in range(0, COLS)]
def randfloat(lo, hi):
return lo + random.random() * hi
def sprite_off(sprite, target_img):
pass
def sprite_move(sprite, target_img):
target_img.paste(sprite,
(random.choice([-1, 1]), random.choice([-1, 1])),
mask=sprite)
def sprite_smooth(sprite, target_img):
blurred = sprite.copy()
blurred.paste(sprite, (0, -1), sprite)
blurred.paste(sprite, (0, 1), sprite)
blurred = Image.blend(blurred, sprite, 0.3)
blurred.paste(sprite, (0, 0), mask=sprite)
target_img.paste(blurred, None, mask=blurred)
def normal(sprite, target_img):
target_img.paste(sprite, None, mask=sprite)
class SpriteJitter:
EMPTY_COLOR = (0, 0, 0)
OPS = [sprite_smooth, sprite_off, sprite_move]
EFFECT_DURATION = [0.2, 1.5]
BREAK_DURATION = [0.5, 1]
SPRITE_IDX_DURATION = [3, 10]
def __init__(self, file_name):
self.image = Image.open(file_name)
self.sprites = self.find_sprites()
self.start_ts = None
self.effect_end = None
self.effect = None
def tick(self, ts):
if self.start_ts is None:
self.start_ts = ts
self.effect_end = ts
self.effect = '--start--'
self.sprite_idx_end = ts
if ts >= self.sprite_idx_end:
self.sprite_idx_end = ts + randfloat(*self.SPRITE_IDX_DURATION)
self.sprite_idx = random.randrange(0, len(self.sprites))
elif ts < self.effect_end:
return
if self.effect == normal:
self.effect = random.choice(self.OPS)
self.effect_end = ts + randfloat(*self.EFFECT_DURATION)
else:
self.effect = normal
self.effect_end = ts + randfloat(*self.BREAK_DURATION)
img = Image.new('RGBA', self.image.size, (0, 0, 0, 0))
for idx, s in enumerate(self.sprites):
if idx != self.sprite_idx:
img.paste(s, (0, 0), mask=s)
if self.sprite_idx is not None:
self.effect(self.sprites[self.sprite_idx], img)
self.arr = img.load()
def source_p(self, c, r):
if (c < 0) or (c >= self.image.width) or (r < 0) or (r >= self.image.height):
return (0, 0, 0, 0)
return self.image.load()[c, r]
def get_pixels(self):
return [self.arr[c, r][:3]
for r in range(0, ROWS)
for c in range(0, COLS)]
def find_sprites(self):
seen_img = Image.new('1', self.image.size, 0)
sprites = []
def seen(x, y): return seen_img.load()[x, y]
def mark_seen(x, y): seen_img.putpixel((x, y), 1)
def flood_copy(sprite_color, spr, x, y):
if seen(x, y):
return
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
if self.source_p(x + dx, y + dy) == sprite_color:
mark_seen(x, y)
spr.putpixel((x, y), sprite_color)
if (dx != 0) or (dy != 0):
flood_copy(sprite_color, spr, x+dx, y+dy)
def get_sprite(sx, sy):
spr = Image.new('RGBA', self.image.size, (0, 0, 0, 0))
flood_copy(self.source_p(sx, sy), spr, sx, sy)
return spr
for x in range(0, self.image.width):
for y in range(0, self.image.height):
if seen(x, y):
continue
if self.source_p(x, y)[:3] == (0, 0, 0):
continue
sprites.append(get_sprite(x, y))
return sprites
class GreenT:
LOOP_LEN = 10
def __init__(self, file_name):
self.image = Image.open(file_name)
(self.image_width, self.image_height) = self.image.size
self.arr = self.image.load()
self.start_ts = None
def tick(self, ts):
if self.start_ts is None:
self.start_ts = ts
self.dy = squash(
14*(0.5-cos((ts - self.start_ts) * 2 * pi / self.LOOP_LEN)),
0, 6,
)
def p(self, c, r):
return self.arr[
squash(c, 0, self.image_width-1),
squash(r, 0, self.image_height-1),
]
def get_pixels(self):
return [self.p(c, r + self.dy)[:3]
for r in range(0, ROWS)
for c in range(0, COLS)]
class MultiEffect:
TRANSITION_TIME = 0.1
def __init__(self, effects, duration):
self.effects = effects
self.duration = duration
self.effect_idx = 0
self.state = 'effect'
self.end_ts = None
self.offset = None
def tick(self, ts):
if self.end_ts is None:
self.end_ts = ts + self.duration
if self.state == 'transition':
self.offset = int(squash((self.TRANSITION_TIME - (self.end_ts - ts)) / self.TRANSITION_TIME * COLS, 0, COLS-1))
self.effects[self.effect_idx].tick(ts)
self.effects[(self.effect_idx + 1) % len(self.effects)].tick(ts)
self.pixels = [None] * ROWS * COLS
curr_pixels = self.effects[self.effect_idx].get_pixels()
next_pixels = self.effects[(self.effect_idx + 1) % len(self.effects)].get_pixels()
for c in range(0, COLS):
for r in range(0, ROWS):
pi = r * COLS + c
if c + self.offset < COLS:
self.pixels[pi] = curr_pixels[pi + self.offset]
else:
self.pixels[pi] = next_pixels[pi + self.offset - COLS]
if ts >= self.end_ts:
self.effect_idx = (self.effect_idx + 1) % len(self.effects)
self.end_ts = ts + self.duration
self.state = 'effect'
else:
self.effects[self.effect_idx].tick(ts)
self.pixels = self.effects[self.effect_idx].get_pixels()
if ts >= self.end_ts:
self.end_ts = ts + self.TRANSITION_TIME
self.state = 'transition'
def get_pixels(self):
return self.pixels
class Bunny:
# In memory of Bull Bunny aka Michael R Oddo
FILE_NAME = 'img/bunny4.png'
def __init__(self):
self.image = Image.open(self.FILE_NAME)
self.arr = self.image.load()
def tick(self, ts):
pass
def get_pixels(self):
return [self.arr[c, r][:3]
for r in range(0, ROWS)
for c in range(0, COLS)]
| mit | -1,440,909,792,892,522,200 | 28.896996 | 123 | 0.515504 | false | 3.337805 | false | false | false |
pthatcher/psync | src/fs/FileSystem.py | 1 | 12463 | # Copyright (c) 2011, Peter Thatcher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The purpose of this file is to abstactly access the FileSystem,
# especially for the purpose of scanning it to see what files are
# different. It works really hard to do so fast.
import hashlib
import logging
import os
import platform
import shutil
import sys
from util import Record
DELETED_SIZE = 0
DELETED_MTIME = 0
class RootedPath(Record("root", "rel")):
""" Represents a path (rel) that is relative to another path
(root). For examples, when scanning a large directory, it is
convenient to know the paths relative to the directory passed in.
In code a RootedPath is often called an "rpath"."""
@property
def full(self):
return join_paths(*self)
# An "rpath" is short for "RootedPath"
class FileStat(Record("rpath", "size", "mtime")):
@classmethod
def from_deleted(cls, rpath):
return cls.new(rpath, DELETED_SIZE, DELETED_MTIME)
@property
def deleted(entry):
return entry.mtime == DELETED_MTIME
STAT_SIZE_INDEX = 6
STAT_MTIME_INDEX = 8
# All paths are unicode separated by "/". We encode for a given
# platform (Windows) as necessary.
PATH_SEP = "/"
def join_paths(*paths):
return PATH_SEP.join(paths)
def parent_path(path):
try:
parent, child = path.rsplit(PATH_SEP, 1)
except:
parent, child = "", path
return parent
# Windows shaves off a bit of mtime info.
# TODO: Only do this sillyness on Windows.
def mtimes_eq(mtime1, mtime2):
return (mtime1 >> 1) == (mtime2 >> 1)
# Path encoding is needed because Windows has really funky rules for
# dealing with unicode paths. It seems like an all OSes, what you get
# back and what it expects from you isn't consistent. The PathEncoder
# stuff is there to be a single place where we can take care of this.
# Also, we want to deal with paths in a consistent way with "/" and
# not worry about Windows oddities ("\", etc).
def PathEncoder():
is_mac = platform.os.name == "posix" and platform.system() == "Darwin"
is_windows = platform.os.name in ["nt", "dos"]
decoding = sys.getfilesystemencoding()
encoding = None if os.path.supports_unicode_filenames else decoding
if is_windows:
return WindowsPathEncoder(encoding, decoding)
else:
return UnixPathEncoder(encoding, decoding)
class UnixPathEncoder(Record("encoding", "decoding")):
def encode_path(self, path):
if self.encoding:
return path.encode(self.encoding)
else:
return path
def decode_path(self, path):
return path.decode(self.decoding)
class WindowsPathEncoder(Record("encoding", "decoding")):
def encode_path(self, path):
win_path = "\\\\?\\" + os.path.abspath(path.replace(PATH_SEP, os.sep))
if self.encoding:
return win_path.encode(self.encoding)
else:
return win_path
def decode_path(self, win_path):
return win_path.replace(os.sep, PATH_SEP).decode(self.decoding)
class FileSystem(Record("slog", "path_encoder")):
"""Encapsulates all of the operations we need on the FileSystem.
The most important part is probably listing/stating."""
READ_MODE = "rb"
NEW_WRITE_MODE = "wb"
EXISTING_WRITE_MODE = "r+b"
# slog needs to have
def __new__(cls, slog):
return cls.new(slog, PathEncoder())
def encode_path(fs, path):
return fs.path_encoder.encode_path(path)
def decode_path(fs, path):
return fs.path_encoder.decode_path(path)
def exists(fs, path):
encoded_path = fs.encode_path(path)
return os.path.exists(encoded_path)
def isdir(fs, path):
encoded_path = fs.encode_path(path)
return os.path.isdir(encoded_path)
def isfile(fs, path):
encoded_path = fs.encode_path(path)
return os.path.isfile(encoded_path)
def isempty(fs, path):
encoded_path = fs.encode_path(path)
for _ in fs.list(encoded_path):
return False
return True
# yields FileStat, with same "root marker" rules as self.list(...)
#
# On my 2008 Macbook, reads about 10,000 files/sec when doing small
# groups (5,000 files), and 4,000 files/sec when doing large
# (200,000). These means it can take anywhere from .1 sec to 1
# minute. Cacheing seems to improve performance by about 30%.
# While running, the CPU is pegged :(. Oh well, 60,000 files in 8
# sec isn't too bad. That's my whole home directory.
#
# On my faster linux desktop machine, it's about 30,000 files/sec
# when cached, even for 200,00 files, which is a big improvement.
def list_stats(fs, root, root_marker = None, names_to_ignore = frozenset()):
return fs.stats(fs.list(
root, root_marker = root_marker, names_to_ignore = names_to_ignore))
# yields a RootedPath for each file found in the root. The intial
# root is the given root. Deeper in, if there is a "root_marker"
# file in a directory, that directory becomes a new root.
def list(fs, root, root_marker = None, names_to_ignore = frozenset()):
listdir = os.listdir
join = os.path.join
isdir = os.path.isdir
islink = os.path.islink
def decode(encoded_path):
try:
return fs.decode_path(encoded_path)
except Exception as err:
fs.slog.path_error("Could not decode file path {0}: {1}"
.format(repr(encoded_path)), err)
return None
# We pass root around so that we only have to decode it once.
def walk(root, encoded_root, encoded_parent):
child_names = listdir(encoded_parent)
if root_marker is not None:
if root_marker in child_names:
encoded_root = encoded_parent
root = decode(encoded_root)
# If decoding root fails, no point in traversing any futher.
if root is not None:
for child_name in child_names:
if child_name not in names_to_ignore:
encoded_full = join(encoded_parent, child_name)
if isdir(encoded_full):
if not islink(encoded_full):
for child in \
walk(root, encoded_root, encoded_full):
yield child
else:
rel = decode(encoded_full[len(encoded_root)+1:])
if rel:
yield RootedPath(root, rel)
encoded_root = fs.encode_path(root)
return walk(root, encoded_root, encoded_root)
# yields FileStats
def stats(fs, rpaths):
stat = os.stat
for rpath in rpaths:
try:
encoded_path = fs.encode_path(rpath.full)
stats = stat(encoded_path)
size = stats[STAT_SIZE_INDEX]
mtime = stats[STAT_MTIME_INDEX]
yield FileStat(rpath, size, mtime)
except OSError:
pass # Probably a link
# returns (size, mtime)
def stat(fs, path):
encoded_path = fs.encode_path(path)
stats = os.stat(encoded_path)
return stats[STAT_SIZE_INDEX], stats[STAT_MTIME_INDEX]
# Will not throw OSError for no path. Will return False in that case.
def stat_eq(fs, path, size, mtime):
try:
(current_size, current_mtime) = fs.stat(path)
return (current_size == size and
mtimes_eq(current_mtime, mtime))
except OSError:
return False
def read(fs, path, start = 0, size = None):
encoded_path = fs.encode_path(path)
with open(path, fs.READ_MODE) as file:
if loc > 0:
file.seek(start, 0)
if size:
return file.read(size)
else:
return file.read()
# On my 2008 Macbook, with SHA1, it can hash 50,000 files
# totalling 145GB (about 3MB each file) in 48min, which is 17
# files totalling 50MB/sec. So, if you scan 30GB of new files, it
# will take 10min. During that time, CPU usage is ~80%.
def hash(fs, path, hash_type = hashlib.sha1, chunk_size = 100000):
if hash_type == None:
return ""
hasher = hash_type()
for chunk_data in fs._iter_chunks(path, chunk_size):
hasher.update(chunk_data)
return hasher.digest()
def _iter_chunks(fs, path, chunk_size):
encoded_path = fs.encode_path(path)
with open(path, fs.READ_MODE) as file:
chunk = file.read(chunk_size)
while chunk:
yield chunk
chunk = file.read(chunk_size)
def write(fs, path, contents, start = None, mtime = None):
encoded_path = fs.encode_path(path)
fs.create_parent_dirs(path)
if (start is not None) and fs.exists(encoded_path):
mode = fs.EXISTING_WRITE_MODE
else:
mode = fs.NEW_WRITE_MODE
with open(encoded_path, mode) as file:
if start is not None:
file.seek(start, 0)
assert start == file.tell(), \
"Failed to seek to proper location in file"
file.write(contents)
if mtime is not None:
fs.touch(encoded_path, mtime)
def touch(fs, path, mtime):
encoded_path = fs.encode_path(path)
os.utime(encoded_path, (mtime, mtime))
def create_parent_dirs(fs, path):
fs.create_dir(parent_path(path))
def create_dir(fs, path):
encoded_path = fs.encode_path(path)
if not os.path.exists(encoded_path):
os.makedirs(encoded_path)
# # Blows up if existing stuff "in the way".
def move(fs, from_path, to_path, mtime = None):
encoded_from_path = fs.encode_path(from_path)
encoded_to_path = fs.encode_path(to_path)
fs.create_parent_dirs(to_path)
os.rename(encoded_from_path, encoded_to_path)
if mtime is not None:
fs.touch(to_path, mtime)
# Blows up if existing stuff "in the way".
def copy(fs, from_path, to_path, mtime = None):
encoded_from_path = fs.encode_path(from_path)
encoded_to_path = fs.encode_path(to_path)
fs.create_parent_dirs(to_path)
shutil.copyfile(encoded_from_path, encoded_to_path)
if mtime is not None:
fs.touch(to_path, mtime)
# Blows up if non-empy directory
def delete(fs, path):
encoded_path = fs.encode_path(path)
if os.path.exists(encoded_path):
os.remove(encoded_path)
def remove_empty_parent_dirs(fs, path):
encoded_parent_path = fs.encode_path(parent_path(path))
try:
os.removedirs(encoded_parent_path)
except OSError:
pass # Not empty
| bsd-3-clause | 971,168,239,324,412,800 | 35.764012 | 80 | 0.61478 | false | 3.889825 | false | false | false |
wesleywh/Scripts | metadata.py | 1 | 5246 | import os,sys #for system commands
import argparse #used for allowing command line switches
from stat import * #for stat command
import datetime #used for float to datetime
# import win32security
import ctypes as _ctypes #this is used to determine windows SID
from ctypes import wintypes as _wintypes
def convert(bytes, type):
text = ""
if bytes < 1024:
number = bytes
text = "BYTES"
elif bytes >= 1025 and bytes < 1048576:
number = bytes/1024
text = "KB"
elif bytes >= 1048577 and bytes < 1073741824:
number = number = bytes/1048576
text = "MB"
elif bytes >= 1073741825:
number = bytes/1073741824
text = "GB"
return str(round(number,2))+" "+text
def return_file_owners(file):
process = os.popen('Icacls '+"\""+file+"\"")
result = process.read()
process.close()
lines = result.split('\n')
for index, line in enumerate(lines):
if file in line:
line = line.split(file)[-1]
elif "Successfully processed 1 files;" in line:
line = ""
lines[index] = line.strip(" ")
lines = [x for x in lines if x]
return lines
def main():
#Available command line options
parser = argparse.ArgumentParser(description='Available Command Line Switches')
parser.add_argument('-F',metavar='F', nargs="+", help="Target File To Scan")
#all all available arguments to the 'args' variable
args = parser.parse_args()
for filePath in args.F:
try:
st = os.stat(os.path.abspath(filePath))
print("File Permissions","."*20,filemode(st.st_mode))
print("Size","."*32,convert(st.st_size, "MB"))
# #windows network all SIDs = wmic useraccount get name,sid
print("User ID","."*29,st.st_uid) #local windows SID = HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList
print("Group ID","."*28,st.st_gid)
if os.name == "nt":
owners = return_file_owners(os.path.abspath(filePath))
print("File Owner(s)","."*23,owners[0])
for index, owner in enumerate(owners):
if index != 0:
print(" "*37,owner)
print("Creation Time","."*23,datetime.datetime.fromtimestamp(st.st_ctime)) #windows = time of creation, unix = time of most recent metadata change
print("Last File Access","."*20,datetime.datetime.fromtimestamp(st.st_atime)) #time of most recent access
print("Last Mod Time","."*23,datetime.datetime.fromtimestamp(st.st_mtime)) #time of most recent content modification
print("Symbolic Link","."*23,S_ISLNK(st.st_mode)) #Return non-zero if the mode is from a symbolic link..
print("# of Locations on System","."*12,st.st_nlink) #number of hard links (number of locations in the file system)
print("Device","."*30,st.st_dev)
# print("st_mode:",st.st_mode) #protection bits
# print("st_ino:",st.st_ino) #inode number
# print("st_dev:",st.st_dev) #device
# print("is directory:",S_ISDIR(st.st_mode)) #is it a directory?
# print("Character Special Device:",S_ISCHR(st.st_mode)) #Return non-zero if the mode is from a character special device file.
# print("block special device file:",S_ISBLK(st.st_mode)) #Return non-zero if the mode is from a block special device file.
# print("Regular File:",S_ISREG(st.st_mode)) #Return non-zero if the mode is from a regular file.
# print("FIFO (named pipe):",S_ISFIFO(st.st_mode)) #Return non-zero if the mode is from a FIFO (named pipe).
# print("Is Socket:",S_ISSOCK(st.st_mode)) #Return non-zero if the mode is from a socket.
# print("Is Door:",S_ISDOOR(st.st_mode)) #Return non-zero if the mode is from a door.
# print("Event Port:",S_ISPORT(st.st_mode)) #Return non-zero if the mode is from an event port.
# print("whiteout:",S_ISWHT(st.st_mode)) #Return non-zero if the mode is from a whiteout.
# try:
# print("file’s permission bits:",S_IMODE(st.st_mode)) #Return the portion of the file’s mode that can be set by os.chmod()—that is, the file’s permission bits, plus the sticky bit, set-group-id, and set-user-id bits (on systems that support them).
# except:
# print("file's permission bits: Unable To Determine")
# print("file type:",S_IFMT(st.st_mode)) #Return the portion of the file’s mode that describes the file type (used by the S_IS*() functions above).
except IOError as e:
print ("I/O error({0}): {1}".format(e.errno, e.strerror))
except ValueError:
print ("Could not convert data to an integer.")
except:
print ("Unexpected error:", sys.exc_info()[0])
main() | mit | 1,616,514,697,141,397,200 | 54.934783 | 264 | 0.574293 | false | 3.901639 | false | false | false |
madeso/prettygood | dotnet/Seymor/Seymour/AddFeedDialog.Designer.py | 1 | 3812 | namespace Seymour
{
partial class AddFeedDialog
{
/// <summary>
/// Required designer variable.
/// </summary>
private System.ComponentModel.IContainer components = null;
/// <summary>
/// Clean up any resources being used.
/// </summary>
/// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
protected override void Dispose(bool disposing)
{
if (disposing && (components != null))
{
components.Dispose();
}
base.Dispose(disposing);
}
#region Windows Form Designer generated code
/// <summary>
/// Required method for Designer support - do not modify
/// the contents of this method with the code editor.
/// </summary>
private void InitializeComponent()
{
this.dCancel = new System.Windows.Forms.Button();
this.dOk = new System.Windows.Forms.Button();
this.dFeedUrl = new System.Windows.Forms.TextBox();
this.SuspendLayout();
//
// dCancel
//
this.dCancel.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Right)));
this.dCancel.DialogResult = System.Windows.Forms.DialogResult.Cancel;
this.dCancel.Location = new System.Drawing.Point(205, 38);
this.dCancel.Name = "dCancel";
this.dCancel.Size = new System.Drawing.Size(75, 23);
this.dCancel.TabIndex = 0;
this.dCancel.Text = "Cancel";
this.dCancel.UseVisualStyleBackColor = true;
this.dCancel.Click += new System.EventHandler(this.dCancel_Click);
//
// dOk
//
this.dOk.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Right)));
this.dOk.Location = new System.Drawing.Point(124, 38);
this.dOk.Name = "dOk";
this.dOk.Size = new System.Drawing.Size(75, 23);
this.dOk.TabIndex = 1;
this.dOk.Text = "OK";
this.dOk.UseVisualStyleBackColor = true;
this.dOk.Click += new System.EventHandler(this.dOk_Click);
//
// dFeedUrl
//
this.dFeedUrl.Anchor = ((System.Windows.Forms.AnchorStyles)(((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Left)
| System.Windows.Forms.AnchorStyles.Right)));
this.dFeedUrl.Location = new System.Drawing.Point(12, 12);
this.dFeedUrl.Name = "dFeedUrl";
this.dFeedUrl.Size = new System.Drawing.Size(268, 20);
this.dFeedUrl.TabIndex = 2;
//
// AddFeedDialog
//
this.AcceptButton = this.dOk;
this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 13F);
this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font;
this.CancelButton = this.dCancel;
this.ClientSize = new System.Drawing.Size(292, 72);
this.Controls.Add(this.dFeedUrl);
this.Controls.Add(this.dOk);
this.Controls.Add(this.dCancel);
this.Name = "AddFeedDialog";
this.Text = "AddFeedDialog";
this.Load += new System.EventHandler(this.AddFeedDialog_Load);
this.ResumeLayout(false);
this.PerformLayout();
}
#endregion
private System.Windows.Forms.Button dCancel;
private System.Windows.Forms.Button dOk;
private System.Windows.Forms.TextBox dFeedUrl;
}
} | mit | 6,284,534,663,532,206,000 | 40.423913 | 155 | 0.575591 | false | 4.006309 | false | false | false |
LTLMoP/LTLMoP | src/lib/parseLP.py | 8 | 14969 | #!/usr/bin/env python
import math,re, os, random
import Polygon, Polygon.IO, Polygon.Utils
import project
from regions import *
import itertools
import decomposition
Polygon.setTolerance(0.1)
class parseLP:
"""
A parser to parse the locative prepositions in specification
"""
def __init__(self):
pass
def main(self,argv):
""" Main function; run automatically when called from command-line """
spec_file = argv
self.regionNear = []
self.regionBetween = []
defaultNearDistance = 50
# load data
self.proj = project.Project()
self.proj.setSilent(True)
self.proj.loadProject(spec_file)
if self.proj.compile_options['decompose']:
# we will do the decomposition
# Look for a defined boundary region, and set it aside if available
self.boundaryRegion = None
for region in self.proj.rfi.regions:
if region.name.lower() == 'boundary':
self.boundaryRegion = region
self.proj.rfi.regions.remove(region)
break
# TODO: If not defined, use the minimum bounding polygon by default
if self.boundaryRegion is None:
print "ERROR: You need to define a boundary region (just create a region named 'boundary' in RegionEditor)"
return
# turn list of string into one string
spec = "\n".join([line for line in self.proj.spec_data['SPECIFICATION']['Spec'] if not line.startswith("#")])
# get all regions that need to find "region near"
# the items in the list are tuple with region name and distance from the region boundary, default value is 50
for m in re.finditer(r'near (?P<rA>\w+)', spec):
if m.group("rA") not in self.regionNear:
self.regionNear.append((m.group("rA"),50))
# find "within distance from a region" is just special case of find "region near"
for m in re.finditer(r'within (?P<dist>\d+) (from|of) (?P<rA>\w+)', spec):
if m.group("rA") not in self.regionNear:
self.regionNear.append((m.group("rA"),int(m.group("dist"))))
# get all regions that need to find "region between"
# the items in the list are tuple with two region names
for m in re.finditer(r'between (?P<rA>\w+) and (?P<rB>\w+)', spec):
if (m.group("rA"),m.group("rB")) not in self.regionBetween and (m.group("rB"),m.group("rA")) not in self.regionBetween:
self.regionBetween.append((m.group("rA"),m.group("rB")))
# generate new regions
self.generateNewRegion()
# break the overlapped regions into seperated parts
self.checkOverLapping()
# remove small regions
self.removeSmallRegions()
# decompose any regions with holes or are concave
if self.proj.compile_options['convexify']:
self.decomp()
# store the regionMapping data to project file
self.proj.regionMapping = self.newPolysMap
# save the regions into new region file
fileName = self.proj.getFilenamePrefix()+'_decomposed.regions'
self.saveRegions(fileName)
else:
# if decompose option is disabled, we skip the following step but keep the mapping
self.newPolysMap = {} # {"nameOfRegion":a list holds name of portion}
for region in self.proj.rfi.regions:
self.newPolysMap[region.name] = [region.name]
# store the regionMapping data to project file
self.proj.regionMapping = self.newPolysMap
fileName = self.proj.getFilenamePrefix()+'_decomposed.regions'
self.proj.rfi.writeFile(fileName)
def generateNewRegion(self):
"""
Generate new regions for locative prepositions
"""
# regions related with "near/within" preposition
for (regionName,dist) in self.regionNear:
for region in self.proj.rfi.regions:
if region.name == regionName:
oldRegion = region
newRegion = oldRegion.findRegionNear(dist,mode="overEstimate",name='near$'+regionName+'$'+str(dist))
self.proj.rfi.regions.append(newRegion)
# regions related with "between" preposition
for (regionNameA,regionNameB) in self.regionBetween:
for region in self.proj.rfi.regions:
if region.name == regionNameA:
regionA = region
elif region.name == regionNameB:
regionB = region
newRegion = findRegionBetween(regionA,regionB,name='between$'+regionNameA+'$and$'+regionNameB+"$")
self.proj.rfi.regions.append(newRegion)
def checkOverLapping(self):
"""
Check if and regions overlap each other
Break the ones that overlap into portions that don't overlap
"""
oldRegionNames=[]
self.oldPolys = {} # {"nameOfRegion":polygon of that region}
self.newPolysMap = {} # {"nameOfRegion":a list holds name of portion}
self.portionOfRegion = {} # {"nameOfPortion":polygon of that portion}
for region in self.proj.rfi.regions:
points = [(pt.x,pt.y) for pt in region.getPoints()]
poly = Polygon.Polygon(points)
self.oldPolys[region.name] = self.intAllPoints(poly)
self.newPolysMap[region.name] = []
oldRegionNames = sorted(self.oldPolys.keys())
self.newPolysMap['others'] = [] # parts out side of all regions
# set up a iterator of lists of boolean value (0/1) for finding overlapping regions
# each item is corrsponding to one possible overlapping
# each boolean value is corresponding to one region
boolList = itertools.product([0,1],repeat=len(oldRegionNames))
self.count = 1 # for naming the portion
# break the overlapping regions
for expr in boolList:
tempRegionList = []
result = self.intAllPoints(Polygon.Polygon([(pt.x,pt.y) for pt in self.boundaryRegion.getPoints()])) # starts with the boundary region
for i,item in enumerate(expr):
if item == 1:
# when the region is included
result = result & self.oldPolys[oldRegionNames[i]]
tempRegionList.append(oldRegionNames[i])
else:
# when the region is excluded
result = result - self.oldPolys[oldRegionNames[i]]
if result.nPoints()>0:
# there is a portion of region left
holeList = []
nonHoleList = []
for i,contour in enumerate(result):
if not result.isHole(i):
nonHoleList.append(Polygon.Polygon(result[i]))
else:
holeList.append(Polygon.Polygon(result[i]))
for nonHolePoly in nonHoleList:
polyWithoutOverlapNode = self.decomposeWithOverlappingPoint(nonHolePoly)
for poly in polyWithoutOverlapNode:
portionName = 'p'+str(self.count)
p = self.intAllPoints(poly)
for hole in holeList:
p = p - self.intAllPoints(hole)
self.portionOfRegion[portionName] = p
if len(tempRegionList) == 0:
self.newPolysMap['others'].append(portionName)
else:
for regionName in tempRegionList:
# update the maping dictionary
self.newPolysMap[regionName].append(portionName)
self.count = self.count + 1
def decomposeWithOverlappingPoint(self,polygon):
"""
When there are points overlapping each other in a given polygon
First decompose this polygon into sub-polygons at the overlapping point
"""
# recursively break the polygon at any overlap point into two polygons until no overlap points are found
# here we are sure there is only one contour in the given polygon
ptDic = {}
overlapPtIndex = None
# look for overlap point and stop when one is found
for i,pt in enumerate(polygon[0]):
if pt not in ptDic:
ptDic[pt]=[i]
else:
ptDic[pt].append(i)
overlapPtIndex = ptDic[pt]
break
if overlapPtIndex:
polyWithoutOverlapNode = []
# break the polygon into sub-polygons
newPoly = Polygon.Polygon(polygon[0][overlapPtIndex[0]:overlapPtIndex[1]])
polyWithoutOverlapNode.extend(self.decomposeWithOverlappingPoint(newPoly))
reducedPoly = Polygon.Polygon(decomposition.removeDuplicatePoints((polygon-newPoly)[0]))
polyWithoutOverlapNode.extend(self.decomposeWithOverlappingPoint(reducedPoly))
else:
# no overlap point is found
return [polygon]
return polyWithoutOverlapNode
def decomp(self):
"""
Decompose the region with holes or are concave
"""
tempDic = {} # temporary variable for storing polygon
# will be merged at the end to self.portionOfRegion
for nameOfPortion,poly in self.portionOfRegion.iteritems():
result = [] # result list of polygon from decomposition
if len(poly)>1:
# the polygon contains holes
holes = [] # list holds polygon stands for holes
for i,contour in enumerate(poly):
if poly.isHole(i):
holes.append(Polygon.Polygon(poly[i]))
else:
newPoly = Polygon.Polygon(poly[i])
de = decomposition.decomposition(newPoly,holes)
result = de.MP5()
else:
# if the polygon doesn't have any hole, decompose it if it is concave,
# nothing will be done if it is convex
de = decomposition.decomposition(poly)
result = de.MP5()
if len(result)>1:
# the region is decomposed to smaller parts
newPortionName=[]
# add the new portions
for item in result:
portionName = 'p'+str(self.count)
newPortionName.append(portionName)
tempDic[portionName] = item
self.count = self.count + 1
# update the mapping dictionary
for nameOfRegion,portionList in self.newPolysMap.iteritems():
if nameOfPortion in portionList:
self.newPolysMap[nameOfRegion].remove(nameOfPortion)
self.newPolysMap[nameOfRegion].extend(newPortionName)
else:
tempDic[nameOfPortion] = Polygon.Polygon(result[0])
self.portionOfRegion = tempDic
def drawAllPortions(self):
"""
Output a drawing of all the polygons that stored in self.portionOfRegion, for debug purpose
"""
if len(self.portionOfRegion)==0:
print "There is no polygon stored."
print
return
polyList = []
for nameOfPortion,poly in self.portionOfRegion.iteritems():
polyList.append(poly)
Polygon.IO.writeSVG('/home/cornell/Desktop/ltlmop-google/allPortions.svg', polyList)
def removeSmallRegions(self):
"""
A function to remove small region
"""
tolerance=0.0000001
# find the area of largest regions
area = 0
for nameOfPortion,poly in self.portionOfRegion.iteritems():
if area<poly.area():
area = poly.area()
# remove small regions
smallRegion = []
for nameOfPortion,poly in self.portionOfRegion.iteritems():
if poly.area()<tolerance*area:
smallRegion.append(nameOfPortion)
for nameOfRegion, portionList in self.newPolysMap.iteritems():
if nameOfPortion in portionList:
self.newPolysMap[nameOfRegion].remove(nameOfPortion)
for region in smallRegion:
#print "remove"+region
del self.portionOfRegion[region]
def intAllPoints(self,poly):
"""
Function that turn all point coordinates into integer
Return a new polygon
"""
return Polygon.Utils.prunePoints(Polygon.Polygon([(int(pt[0]),int(pt[1])) for pt in poly[0]]))
def saveRegions(self, fileName=''):
"""
Save the region data into a new region file
"""
# use the existing rfi as to start
# the only different data is regions
self.proj.rfi.regions = []
for nameOfPortion,poly in self.portionOfRegion.iteritems():
newRegion = Region()
newRegion.name = nameOfPortion
newRegion.color = Color()
newRegion.color.SetFromName(random.choice(['RED','ORANGE','YELLOW','GREEN','BLUE','PURPLE']))
for i,ct in enumerate(poly):
if poly.isHole(i):
newRegion.holeList.append([Point(*x) for x in Polygon.Utils.pointList(Polygon.Polygon(poly[i]))])
else:
newRegion.pointArray = [Point(*x) for x in Polygon.Utils.pointList(Polygon.Polygon(poly[i]))]
newRegion.alignmentPoints = [False] * len([x for x in newRegion.getPoints()])
newRegion.recalcBoundingBox()
if newRegion.getDirection() == dir_CCW:
newRegion.pointArray.reverse()
self.proj.rfi.regions.append(newRegion)
# Giant loop!
for obj1 in self.proj.rfi.regions:
for obj2 in self.proj.rfi.regions:
self.proj.rfi.splitSubfaces(obj1, obj2)
self.proj.rfi.recalcAdjacency()
self.proj.rfi.writeFile(fileName)
| gpl-3.0 | 8,325,122,493,284,336,000 | 42.514535 | 146 | 0.553277 | false | 4.485766 | false | false | false |
wy65701436/harbor | tests/apitests/python/library/project.py | 2 | 10378 | # -*- coding: utf-8 -*-
import v2_swagger_client
from library.base import _assert_status_code
from v2_swagger_client.models.role_request import RoleRequest
from v2_swagger_client.rest import ApiException
import base
def is_member_exist_in_project(members, member_user_name, expected_member_role_id = None):
result = False
for member in members:
if member.entity_name == member_user_name:
if expected_member_role_id != None:
if member.role_id == expected_member_role_id:
return True
else:
return True
return result
def get_member_id_by_name(members, member_user_name):
for member in members:
if member.entity_name == member_user_name:
return member.id
return None
class Project(base.Base):
def __init__(self, username=None, password=None):
kwargs = dict(api_type="projectv2")
if username and password:
kwargs["credential"] = base.Credential('basic_auth', username, password)
super(Project, self).__init__(**kwargs)
def create_project(self, name=None, registry_id=None, metadata=None, expect_status_code = 201, expect_response_body = None, **kwargs):
if name is None:
name = base._random_name("project")
if metadata is None:
metadata = {}
if registry_id is None:
registry_id = registry_id
project = v2_swagger_client.ProjectReq(project_name=name, registry_id = registry_id, metadata=metadata)
try:
_, status_code, header = self._get_client(**kwargs).create_project_with_http_info(project)
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
if expect_response_body is not None:
base._assert_status_body(expect_response_body, e.body)
return
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(201, status_code)
return base._get_id_from_header(header), name
def get_projects(self, params, **kwargs):
data = []
data, status_code, _ = self._get_client(**kwargs).list_projects_with_http_info(**params)
base._assert_status_code(200, status_code)
return data
def get_project_id(self, project_name, **kwargs):
project_data = self.get_projects(dict(), **kwargs)
actual_count = len(project_data)
if actual_count == 1 and str(project_data[0].project_name) != str(project_name):
return project_data[0].project_id
else:
return None
def projects_should_exist(self, params, expected_count = None, expected_project_id = None, **kwargs):
project_data = self.get_projects(params, **kwargs)
actual_count = len(project_data)
if expected_count is not None and actual_count!= expected_count:
raise Exception(r"Private project count should be {}.".format(expected_count))
if expected_project_id is not None and actual_count == 1 and str(project_data[0].project_id) != str(expected_project_id):
raise Exception(r"Project-id check failed, expect {} but got {}, please check this test case.".format(str(expected_project_id), str(project_data[0].project_id)))
def check_project_name_exist(self, name=None, **kwargs):
try:
_, status_code, _ = self._get_client(**kwargs).head_project_with_http_info(name)
except ApiException as e:
status_code = -1
return {
200: True,
404: False,
}.get(status_code,False)
def get_project(self, project_id, expect_status_code = 200, expect_response_body = None, **kwargs):
try:
data, status_code, _ = self._get_client(**kwargs).get_project_with_http_info(project_id)
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
if expect_response_body is not None:
base._assert_status_body(expect_response_body, e.body)
return
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
print("Project {} info: {}".format(project_id, data))
return data
def update_project(self, project_id, expect_status_code=200, metadata=None, cve_allowlist=None, **kwargs):
project = v2_swagger_client.ProjectReq(metadata=metadata, cve_allowlist=cve_allowlist)
try:
_, sc, _ = self._get_client(**kwargs).update_project_with_http_info(project_id, project)
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
else:
base._assert_status_code(expect_status_code, sc)
def delete_project(self, project_id, expect_status_code = 200, **kwargs):
_, status_code, _ = self._get_client(**kwargs).delete_project_with_http_info(project_id)
base._assert_status_code(expect_status_code, status_code)
def get_project_log(self, project_name, expect_status_code = 200, **kwargs):
body, status_code, _ = self._get_client(**kwargs).get_logs_with_http_info(project_name)
base._assert_status_code(expect_status_code, status_code)
return body
def filter_project_logs(self, project_name, operator, resource, resource_type, operation, **kwargs):
access_logs = self.get_project_log(project_name, **kwargs)
count = 0
for each_access_log in list(access_logs):
if each_access_log.username == operator and \
each_access_log.resource_type == resource_type and \
each_access_log.resource == resource and \
each_access_log.operation == operation:
count = count + 1
return count
def get_project_members(self, project_id, **kwargs):
kwargs['api_type'] = 'member'
return self._get_client(**kwargs).list_project_members(project_id)
def get_project_member(self, project_id, member_id, expect_status_code = 200, expect_response_body = None, **kwargs):
from swagger_client.rest import ApiException
kwargs['api_type'] = 'member'
data = []
try:
data, status_code, _ = self._get_client(**kwargs).get_project_member_with_http_info(project_id, member_id,)
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
if expect_response_body is not None:
base._assert_status_body(expect_response_body, e.body)
return
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
return data
def get_project_member_id(self, project_id, member_user_name, **kwargs):
kwargs['api_type'] = 'member'
members = self.get_project_members(project_id, **kwargs)
result = get_member_id_by_name(list(members), member_user_name)
if result == None:
raise Exception(r"Failed to get member id of member {} in project {}.".format(member_user_name, project_id))
else:
return result
def check_project_member_not_exist(self, project_id, member_user_name, **kwargs):
kwargs['api_type'] = 'member'
members = self.get_project_members(project_id, **kwargs)
result = is_member_exist_in_project(list(members), member_user_name)
if result == True:
raise Exception(r"User {} should not be a member of project with ID {}.".format(member_user_name, project_id))
def check_project_members_exist(self, project_id, member_user_name, expected_member_role_id = None, **kwargs):
kwargs['api_type'] = 'member'
members = self.get_project_members(project_id, **kwargs)
result = is_member_exist_in_project(members, member_user_name, expected_member_role_id = expected_member_role_id)
if result == False:
raise Exception(r"User {} should be a member of project with ID {}.".format(member_user_name, project_id))
def update_project_member_role(self, project_id, member_id, member_role_id, expect_status_code = 200, **kwargs):
kwargs['api_type'] = 'member'
role = RoleRequest(role_id = member_role_id)
data, status_code, _ = self._get_client(**kwargs).update_project_member_with_http_info(project_id, member_id, role = role)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
return data
def delete_project_member(self, project_id, member_id, expect_status_code = 200, **kwargs):
kwargs['api_type'] = 'member'
_, status_code, _ = self._get_client(**kwargs).delete_project_member_with_http_info(project_id, member_id)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
def add_project_members(self, project_id, user_id = None, member_role_id = None, _ldap_group_dn=None, expect_status_code = 201, **kwargs):
kwargs['api_type'] = 'member'
projectMember = v2_swagger_client.ProjectMember()
if user_id is not None:
projectMember.member_user = {"user_id": int(user_id)}
if member_role_id is None:
projectMember.role_id = 1
else:
projectMember.role_id = member_role_id
if _ldap_group_dn is not None:
projectMember.member_group = v2_swagger_client.UserGroup(ldap_group_dn=_ldap_group_dn)
data = []
try:
data, status_code, header = self._get_client(**kwargs).create_project_member_with_http_info(project_id, project_member = projectMember)
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
else:
base._assert_status_code(expect_status_code, status_code)
return base._get_id_from_header(header)
def query_user_logs(self, project_name, status_code=200, **kwargs):
try:
logs = self.get_project_log(project_name, expect_status_code=status_code, **kwargs)
count = 0
for log in list(logs):
count = count + 1
return count
except ApiException as e:
_assert_status_code(status_code, e.status)
return 0 | apache-2.0 | -2,864,483,311,539,250,000 | 47.050926 | 173 | 0.628059 | false | 3.706429 | false | false | false |
moria/zulip | zerver/management/commands/gravatar_to_user_avatar.py | 124 | 2043 | from __future__ import absolute_import
import requests
from zerver.models import get_user_profile_by_email, UserProfile
from zerver.lib.avatar import gravatar_hash
from zerver.lib.upload import upload_avatar_image
from django.core.management.base import BaseCommand, CommandError
from django.core.files.uploadedfile import SimpleUploadedFile
class Command(BaseCommand):
help = """Migrate the specified user's Gravatar over to an avatar that we serve. If two
email addresses are specified, use the Gravatar for the first and upload the image
for both email addresses."""
def add_arguments(self, parser):
parser.add_argument('old_email', metavar='<old email>', type=str,
help="user whose Gravatar should be migrated")
parser.add_argument('new_email', metavar='<new email>', type=str, nargs='?', default=None,
help="user to copy the Gravatar to")
def handle(self, *args, **options):
old_email = options['old_email']
if options['new_email']:
new_email = options['new_email']
else:
new_email = old_email
gravatar_url = "https://secure.gravatar.com/avatar/%s?d=identicon" % (gravatar_hash(old_email),)
gravatar_data = requests.get(gravatar_url).content
gravatar_file = SimpleUploadedFile('gravatar.jpg', gravatar_data, 'image/jpeg')
try:
user_profile = get_user_profile_by_email(old_email)
except UserProfile.DoesNotExist:
try:
user_profile = get_user_profile_by_email(new_email)
except UserProfile.DoesNotExist:
raise CommandError("Could not find specified user")
upload_avatar_image(gravatar_file, user_profile, old_email)
if old_email != new_email:
gravatar_file.seek(0)
upload_avatar_image(gravatar_file, user_profile, new_email)
user_profile.avatar_source = UserProfile.AVATAR_FROM_USER
user_profile.save(update_fields=['avatar_source'])
| apache-2.0 | -7,385,521,084,850,490,000 | 42.468085 | 104 | 0.660793 | false | 4.086 | false | false | false |
noba3/KoTos | addons/plugin.video.hubwizard/default.py | 1 | 9230 | # Config Wizard By: Blazetamer 2013-2014
# Thanks to Blazetamer, TheHighway, and the rest of the crew at TVADDONS.ag (XBMCHUB.com).
import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon,os,sys,downloader,extract,time,shutil
import wizardmain as main
AddonTitle='Config Wizard'; wizardUrl='http://tribeca.tvaddons.ag/tools/wizard/';
SiteDomain='TVADDONS.AG'; TeamName='TEAM TVADDONS';
addon=main.addon; net=main.net; settings=main.settings;
SkinBackGroundImg=os.path.join('special://','home','media','SKINDEFAULT.jpg')
RequiredHostsPath=xbmc.translatePath(os.path.join(main.AddonPath,'requiredhosts.py'))
RequiredHostsUrl=wizardUrl+'requiredhosts.txt'
RequiredHostsUrl='https://offshoregit.com/xbmchub/config-wizard-development/raw/master/requiredhosts.py'
LinksUrl=wizardUrl+'links.txt'
#LinksUrl='https://offshoregit.com/xbmchub/config-wizard-development/raw/master/links.txt'
LocalLinks=xbmc.translatePath(os.path.join(main.AddonPath,'links.txt'))
#==========================Help WIZARD=====================================================================================================
def HELPCATEGORIES():
if ((XBMCversion['Ver'] in ['','']) or (int(XBMCversion['two']) < 12)) and (settings.getSetting('bypass-xbmcversion')=='false'):
eod(); addon.show_ok_dialog(["Compatibility Issue: Outdated Kodi Setup","Please upgrade to a newer version of XBMC first!","Visit %s for Support!"%SiteDomain],title="XBMC "+XBMCversion['Ver'],is_error=False); DoA('Back');
else:
if main.isFile(LocalLinks)==True: link=main.nolines(main.FileOpen(LocalLinks))
else: link=main.OPEN_URL(LinksUrl).replace('\n','').replace('\r','').replace('\a','')
match=re.compile('name="(.+?)".+?rl="(.+?)".+?mg="(.+?)".+?anart="(.+?)".+?escription="(.+?)".+?ype="(.+?)"').findall(link)
for name,url,iconimage,fanart,description,filetype in match:
#if 'status' in filetype:
#main.addHELPDir(name,url,'wizardstatus',iconimage,fanart,description,filetype)
#else:
main.addHELPDir(name,url,'helpwizard',iconimage,fanart,description,filetype)
CustomUrl=settings.getSetting('custom-url')
try:
if (len(CustomUrl) > 10) and ('://' in CustomUrl):
main.addHELPDir('Custom Url[CR](Addon Settings)',CustomUrl,'helpwizard',main.AddonIcon,main.AddonFanart,"Custom url found in addon settings.","main") ## For Testing to test a url with a FileHost.
except: pass
#main.addHELPDir('Testing','http://www.firedrive.com/file/################','helpwizard',iconimage,fanart,description,filetype) ## For Testing to test a url with a FileHost.
main.AUTO_VIEW('movies')
## ### ##
def xEBb(t): main.xEB('Skin.SetBool(%s)'%t)
def xEBS(t,n): main.xEB('Skin.SetString(%s,%s)'%(t,n))
def HELPWIZARD(name,url,description,filetype):
path=xbmc.translatePath(os.path.join('special://home','addons','packages')); confirm=xbmcgui.Dialog(); filetype=filetype.lower();
if filetype=='splash':
try: html=main.OPEN_URL(url)
except: return
import splash_highway as splash
SplashBH=xbmc.translatePath(os.path.join(main.AddonPath,'ContentPanel.png'))
ExitBH=xbmc.translatePath(os.path.join(main.AddonPath,'Exit.png'))
splash.do_My_TextSplash2(html,SplashBH,12,TxtColor='0xff00bfff',Font='font12',BorderWidth=40,ImgexitBtn=ExitBH,colorDiffuse='0xff00bfff');
return
if confirm.yesno(TeamName,"Would you like %s to "%SiteDomain,"customize your add-on selection? "," "):
dp=xbmcgui.DialogProgress(); dp.create(AddonTitle,"Downloading ",'','Please Wait')
lib=os.path.join(path,name+'.zip')
try: os.remove(lib)
except: pass
### ## ... ##
#try:
# if (main.isFile(LocalLinks)==False) or (main.isFile(RequiredHostsPath)==False): FHTML=main.OPEN_URL(RequiredHostsUrl); main.FileSave(RequiredHostsPath,FHTML); time.sleep(2)
#except: pass
if main.isFile(RequiredHostsPath)==False: dialog=xbmcgui.Dialog(); dialog.ok("Error!",'import not found.'); return
try: import requiredhosts as RequiredHosts
except: print "error attempting to import requiredhosts as RequiredHosts"; dialog=xbmcgui.Dialog(); dialog.ok("Error!","import failed."); return
#print {'url':url}
url=RequiredHosts.CheckForHosts(url); #print {'url':url}
### ## ... ##
if str(url).endswith('[error]'): print url; dialog=xbmcgui.Dialog(); dialog.ok("Error!",url); return
if '[error]' in url: print url; dialog=xbmcgui.Dialog(); dialog.ok("Error!",url); return
if not str(url).lower().startswith('http://'): print url; dialog=xbmcgui.Dialog(); dialog.ok("Error!",url); return
print {'url':url}
downloader.download(url,lib,dp)
### ## ... ##
#return ## For Testing 2 Black Overwrite of stuff. ##
### ## ... ##
if filetype=='main': addonfolder=xbmc.translatePath('special://home')
elif filetype=='addon': addonfolder=xbmc.translatePath(os.path.join('special://home','addons'))
else: print {'filetype':filetype}; dialog=xbmcgui.Dialog(); dialog.ok("Error!",'filetype: "%s"'%str(filetype)); return
#time.sleep(2)
xbmc.sleep(4000)
dp.update(0,"","Extracting Zip Please Wait")
print '======================================='; print addonfolder; print '======================================='
extract.all(lib,addonfolder,dp)
proname=xbmc.getInfoLabel("System.ProfileName")
if (filetype=='main') and (settings.getSetting('homescreen-shortcuts')=='true'):
link=main.OPEN_URL(wizardUrl+'shortcuts.txt')
shorts=re.compile('shortcut="(.+?)"').findall(link)
for shortname in shorts: main.xEB('Skin.SetString(%s)'%shortname)
if (filetype=='main') and (settings.getSetting('other-skin-settings')=='true'):
#main.xEB('Skin.SetString(CustomBackgroundPath,%s)' %img)
#main.xEB('Skin.SetBool(ShowBackgroundVideo)') ## Set to true so we can later set them to false.
#main.xEB('Skin.SetBool(ShowBackgroundVis)') ## Set to true so we can later set them to false.
#main.xEB('Skin.ToggleSetting(ShowBackgroundVideo)') ## Switching from true to false.
#main.xEB('Skin.ToggleSetting(ShowBackgroundVis)') ## Switching from true to false.
xEBb('HideBackGroundFanart')
xEBb('HideVisualizationFanart')
xEBb('AutoScroll')
if (filetype=='main') and (main.isFile(xbmc.translatePath(SkinBackGroundImg))==True):
xEBS('CustomBackgroundPath',SkinBackGroundImg)
xEBb('UseCustomBackground')
#time.sleep(2)
xbmc.sleep(4000)
xbmc.executebuiltin('UnloadSkin()'); xbmc.executebuiltin('ReloadSkin()'); xbmc.executebuiltin("LoadProfile(%s)" % proname)
dialog=xbmcgui.Dialog(); dialog.ok("Success!","Installation Complete"," [COLOR gold]Brought To You By %s[/COLOR]"%SiteDomain)
##
#==========
def DoA(a): xbmc.executebuiltin("Action(%s)" % a) #DoA('Back'); # to move to previous screen.
def eod(): addon.end_of_directory()
#==========OS Type & XBMC Version===========================================================================================
XBMCversion={}; XBMCversion['All']=xbmc.getInfoLabel("System.BuildVersion"); XBMCversion['Ver']=XBMCversion['All']; XBMCversion['Release']=''; XBMCversion['Date']='';
if ('Git:' in XBMCversion['All']) and ('-' in XBMCversion['All']): XBMCversion['Date']=XBMCversion['All'].split('Git:')[1].split('-')[0]
if ' ' in XBMCversion['Ver']: XBMCversion['Ver']=XBMCversion['Ver'].split(' ')[0]
if '-' in XBMCversion['Ver']: XBMCversion['Release']=XBMCversion['Ver'].split('-')[1]; XBMCversion['Ver']=XBMCversion['Ver'].split('-')[0]
if len(XBMCversion['Ver']) > 1: XBMCversion['two']=str(XBMCversion['Ver'][0])+str(XBMCversion['Ver'][1])
else: XBMCversion['two']='00'
if len(XBMCversion['Ver']) > 3: XBMCversion['three']=str(XBMCversion['Ver'][0])+str(XBMCversion['Ver'][1])+str(XBMCversion['Ver'][3])
else: XBMCversion['three']='000'
sOS=str(main.get_xbmc_os());
print [['Version All',XBMCversion['All']],['Version Number',XBMCversion['Ver']],['Version Release Name',XBMCversion['Release']],['Version Date',XBMCversion['Date']],['OS',sOS]]
#==========END HELP WIZARD==================================================================================================
params=main.get_params(); url=None; name=None; mode=None; year=None; imdb_id=None
def ParsUQP(s,Default=None):
try: return urllib.unquote_plus(params[s])
except: return Default
fanart=ParsUQP("fanart",""); description=ParsUQP("description",""); filetype=ParsUQP("filetype",""); url=ParsUQP("url",""); name=ParsUQP("name",""); mode=ParsUQP("mode"); year=ParsUQP("year");
print "Mode: "+str(mode); print "URL: "+str(url); print "Name: "+str(name); print "Year: "+str(year)
if mode==None or url==None or len(url)<1: HELPCATEGORIES()
elif mode=="wizardstatus": print""+url; items=main.WIZARDSTATUS(url)
elif mode=='helpwizard': HELPWIZARD(name,url,description,filetype)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gpl-2.0 | -8,947,119,783,127,450,000 | 72.84 | 230 | 0.631203 | false | 3.458224 | true | false | false |
baldengineers/KickstarterBot | imagegen.py | 1 | 5608 | import random
import pickle
from sklearn import tree
from pprint import pprint
from PIL import Image
##choices = ["█", " ", "▓", "▒", "░"]
choices = ["██", " "]
WIDTH = 8
def clear(p):
if p:
name = 'training.dat'
with open(name, 'rb') as f:
l = pickle.load(f)
l = l[1][:-1]
with open(name, 'wb') as f:
pickle.dump(l,f)
else:
with open("training.dat", "wb") as f:
pickle.dump([0,[]],f)
def run(width):
total = []
for i in range(width):
total.append([])
for j in range(width):
total[i].append("")
if width % 2 != 0:
for i in range(width):
f = choices[random.randint(0,len(choices)-1)]
total[i][int((width-1)/2)] = f
for i in range(width):
if width % 2 != 0:
for j in range(int((width-1)/2)):
x = choices[random.randint(0,len(choices)-1)]
total[i][j] = x
total[i][width-1-j] = x
else:
for j in range(int(width/2)):
x = choices[random.randint(0,len(choices)-1)]
total[i][j] = x
total[i][width-j-1] = x
for l in total:
strng = ""
for sl in l:
strng += sl
print(strng)
return sprite_to_num(total)
def like(t):
#whether you like the image or not
name = 'training.dat'
with open(name, 'rb') as f:
l = pickle.load(f)
ans = input("te gusta hombre? (y/n)\n")
if ans == "y":
#print('appending to yes list:', t)
l[1].append([t, 1]) # tell computer you like the image
im = Image.new("RGB", (WIDTH, WIDTH))
pix = im.load()
for x in range(WIDTH):
for y in range(WIDTH):
if t[y][x] == "0": #0 means black
pix[x,y] = (0,0,0)
else:
pix[x,y] = (255,255,255)
im.save("sprites/%d.png" % l[0], "PNG")
l[0] += 1
elif ans == "n":
#print('appending to no list:', t)
l[1].append([t, 0]) # tell computer you do not like the image
#print(l)
else:
return
with open(name, 'wb') as f:
pickle.dump(l,f)
def sprite_to_num(sprite):
#converts sprite into a readable format for sklearn
for i, row in enumerate(sprite):
s = ""
for j, char in enumerate(row): #char is the individual items in each row
s += str(choices.index(char))
sprite[i] = s
return sprite
def learn(width):
name = 'training.dat'
with open(name, 'rb') as f:
l = pickle.load(f)
l = l[1]
if l == []:
#pass
return run(width)
features = []
labels = []
## for sprite in l:
## for i, row in enumerate(sprite):
## for j, s in enumerate(row): #s is the individual items in each row
## #-1 means there is no character adjacenct to the current character
## up = choices.index(sprite[i-1][j]) if i != 0 else -1 #the item above the current
## down = choices.index(sprite[i+1][j]) if i != width - 1 else -1
## left = choices.index(sprite[i][j-1]) if j != 0 else -1
## right = choices.index(sprite[i][j+1]) if j != width - 1 else -1
##
## #features.append([up, down, left, right, i, j])
## features.append([up, left, i, j]) #only up and left because down and right haven't been generated yet
## labels.append(choices.index(s))
## #print(up, down, left, right)
for sprite in l:
## pprint(sprite[0])
## s = sprite_to_num(sprite[0])
features.append(sprite[0])
labels.append(sprite[1])
clf = tree.DecisionTreeClassifier()
clf = clf.fit(features, labels)
#random indices to create a fixed char (in order to randomize results)
#fixed_i, fixed_j = random.randint(0, width-1), random.randint(0, width-1)
#total[fixed_i][fixed_j] = choices[random.randint(0, len(choices)-1)]
## if width % 2 != 0:
## for i in range(width):
## f = choices[random.randint(0,len(choices)-1)]
## total[i][int((width-1)/2)] = f
##
##
## for i in range(width):
## if width % 2 != 0:
## for j in range(int((width-1)/2)):
## x = choices[random.randint(0,len(choices)-1)]
## total[i][j] = x
## total[i][width-1-j] = x
## else:
## for i in range(width):
## for j in range(width):
## #if i == fixed_i and j == fixed_j:
## # continue
## up = choices.index(total[i-1][j]) if i != 0 else -1 #the item above the current
## #down = choices.index(total[i+1][j]) if i != width - 1 else -1
## left = choices.index(total[i][j-1]) if j != 0 else -1
## #right = choices.index(total[i][j+1]) if j != width - 1 else -1
## x = clf.predict([[up, down, left, right, i, j]])[0]
## x = clf.predict([[up, left, i, j]])[0]
total = run(width)
#t = sprite_to_num(total)
## print('total: ')
## pprint(total)
x = clf.predict([total])
if x:
print("Computer says YES: ")
pprint(total)
else:
print("Computer says NO: ")
pprint(total)
return total
#print(clf.predict())
#clear(0) #1 if remove last one, 0 if all
while True:
#like(run(8))
like(learn(WIDTH))
| mit | 205,228,781,841,308,060 | 29.248649 | 119 | 0.490172 | false | 3.284038 | false | false | false |
rewardz/django_model_helpers | tests/simple_app/simple_app/settings.py | 1 | 2611 | """
Django settings for simple_app project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xj#n!k9!7lgce4yem@h9g%jpg_cg&4!&_eh6gknic_b%e$yndk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'sample'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'simple_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'simple_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'cache',
'TIMEOUT': 3 # Every 3 seconds
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'': {
'handlers': [],
'level': 'DEBUG',
'propagate': False,
},
}
| mit | -5,824,480,294,159,593,000 | 25.373737 | 71 | 0.663347 | false | 3.596419 | false | false | false |
befelix/GPy | GPy/kern/src/multioutput_kern.py | 1 | 7150 | from .kern import Kern, CombinationKernel
import numpy as np
from functools import reduce, partial
from .independent_outputs import index_to_slices
from paramz.caching import Cache_this
class ZeroKern(Kern):
def __init__(self):
super(ZeroKern, self).__init__(1, None, name='ZeroKern',useGPU=False)
def K(self, X ,X2=None):
if X2 is None:
X2 = X
return np.zeros((X.shape[0],X2.shape[0]))
def update_gradients_full(self,dL_dK, X, X2=None):
return np.zeros(dL_dK.shape)
def gradients_X(self,dL_dK, X, X2=None):
return np.zeros((X.shape[0],X.shape[1]))
class MultioutputKern(CombinationKernel):
"""
Multioutput kernel is a meta class for combining different kernels for multioutput GPs.
As an example let us have inputs x1 for output 1 with covariance k1 and x2 for output 2 with covariance k2.
In addition, we need to define the cross covariances k12(x1,x2) and k21(x2,x1). Then the kernel becomes:
k([x1,x2],[x1,x2]) = [k1(x1,x1) k12(x1, x2); k21(x2, x1), k2(x2,x2)]
For the kernel, the kernels of outputs are given as list in param "kernels" and cross covariances are
given in param "cross_covariances" as a dictionary of tuples (i,j) as keys. If no cross covariance is given,
it defaults to zero, as in k12(x1,x2)=0.
In the cross covariance dictionary, the value needs to be a struct with elements
-'kernel': a member of Kernel class that stores the hyper parameters to be updated when optimizing the GP
-'K': function defining the cross covariance
-'update_gradients_full': a function to be used for updating gradients
-'gradients_X': gives a gradient of the cross covariance with respect to the first input
"""
def __init__(self, kernels, cross_covariances={}, name='MultioutputKern'):
#kernels contains a list of kernels as input,
if not isinstance(kernels, list):
self.single_kern = True
self.kern = kernels
kernels = [kernels]
else:
self.single_kern = False
self.kern = kernels
# The combination kernel ALLWAYS puts the extra dimension last.
# Thus, the index dimension of this kernel is always the last dimension
# after slicing. This is why the index_dim is just the last column:
self.index_dim = -1
super(MultioutputKern, self).__init__(kernels=kernels, extra_dims=[self.index_dim], name=name, link_parameters=False)
nl = len(kernels)
#build covariance structure
covariance = [[None for i in range(nl)] for j in range(nl)]
linked = []
for i in range(0,nl):
unique=True
for j in range(0,nl):
if i==j or (kernels[i] is kernels[j]):
covariance[i][j] = {'kern': kernels[i], 'K': kernels[i].K, 'update_gradients_full': kernels[i].update_gradients_full, 'gradients_X': kernels[i].gradients_X}
if i>j:
unique=False
elif cross_covariances.get((i,j)) is not None: #cross covariance is given
covariance[i][j] = cross_covariances.get((i,j))
else: # zero covariance structure
kern = ZeroKern()
covariance[i][j] = {'kern': kern, 'K': kern.K, 'update_gradients_full': kern.update_gradients_full, 'gradients_X': kern.gradients_X}
if unique is True:
linked.append(i)
self.covariance = covariance
self.link_parameters(*[kernels[i] for i in linked])
@Cache_this(limit=3, ignore_args=())
def K(self, X ,X2=None):
if X2 is None:
X2 = X
slices = index_to_slices(X[:,self.index_dim])
slices2 = index_to_slices(X2[:,self.index_dim])
target = np.zeros((X.shape[0], X2.shape[0]))
[[[[ target.__setitem__((slices[i][k],slices2[j][l]), self.covariance[i][j]['K'](X[slices[i][k],:],X2[slices2[j][l],:])) for k in range( len(slices[i]))] for l in range(len(slices2[j])) ] for i in range(len(slices))] for j in range(len(slices2))]
return target
@Cache_this(limit=3, ignore_args=())
def Kdiag(self,X):
slices = index_to_slices(X[:,self.index_dim])
kerns = itertools.repeat(self.kern) if self.single_kern else self.kern
target = np.zeros(X.shape[0])
[[np.copyto(target[s], kern.Kdiag(X[s])) for s in slices_i] for kern, slices_i in zip(kerns, slices)]
return target
def _update_gradients_full_wrapper(self, cov_struct, dL_dK, X, X2):
gradient = cov_struct['kern'].gradient.copy()
cov_struct['update_gradients_full'](dL_dK, X, X2)
cov_struct['kern'].gradient += gradient
def _update_gradients_diag_wrapper(self, kern, dL_dKdiag, X):
gradient = kern.gradient.copy()
kern.update_gradients_diag(dL_dKdiag, X)
kern.gradient += gradient
def reset_gradients(self):
for kern in self.kern: kern.reset_gradients()
def update_gradients_full(self,dL_dK, X, X2=None):
self.reset_gradients()
slices = index_to_slices(X[:,self.index_dim])
if X2 is not None:
slices2 = index_to_slices(X2[:,self.index_dim])
[[[[ self._update_gradients_full_wrapper(self.covariance[i][j], dL_dK[slices[i][k],slices2[j][l]], X[slices[i][k],:], X2[slices2[j][l],:]) for k in range(len(slices[i]))] for l in range(len(slices2[j]))] for i in range(len(slices))] for j in range(len(slices2))]
else:
[[[[ self._update_gradients_full_wrapper(self.covariance[i][j], dL_dK[slices[i][k],slices[j][l]], X[slices[i][k],:], X[slices[j][l],:]) for k in range(len(slices[i]))] for l in range(len(slices[j]))] for i in range(len(slices))] for j in range(len(slices))]
def update_gradients_diag(self, dL_dKdiag, X):
self.reset_gradients()
slices = index_to_slices(X[:,self.index_dim])
[[ self._update_gradients_diag_wrapper(self.covariance[i][i]['kern'], dL_dKdiag[slices[i][k]], X[slices[i][k],:]) for k in range(len(slices[i]))] for i in range(len(slices))]
def gradients_X(self,dL_dK, X, X2=None):
slices = index_to_slices(X[:,self.index_dim])
target = np.zeros((X.shape[0], X.shape[1]) )
if X2 is not None:
slices2 = index_to_slices(X2[:,self.index_dim])
[[[[ target.__setitem__((slices[i][k]), target[slices[i][k],:] + self.covariance[i][j]['gradients_X'](dL_dK[slices[i][k],slices2[j][l]], X[slices[i][k],:], X2[slices2[j][l],:]) ) for k in range(len(slices[i]))] for l in range(len(slices2[j]))] for i in range(len(slices))] for j in range(len(slices2))]
else:
[[[[ target.__setitem__((slices[i][k]), target[slices[i][k],:] + self.covariance[i][j]['gradients_X'](dL_dK[slices[i][k],slices[j][l]], X[slices[i][k],:], (None if (i==j and k==l) else X[slices[j][l],:] )) ) for k in range(len(slices[i]))] for l in range(len(slices[j]))] for i in range(len(slices))] for j in range(len(slices))]
return target | bsd-3-clause | 6,165,214,033,855,403,000 | 53.587786 | 341 | 0.605035 | false | 3.294931 | false | false | false |
mcleary/glbinding | source/codegeneration/scripts/binding.py | 2 | 3196 |
import os, sys
import pystache
from classes.Extension import Extension
execDir = os.path.dirname(os.path.abspath(sys.argv[0])) + "/"
templateDir = "templates/"
templateExtension = "tpl"
tab = " "
tab2 = tab + tab
def versionBID(feature, core = False, ext = False):
if feature is None:
return ""
version = str(feature.major) + str(feature.minor)
if core:
return version + "core"
elif ext:
return version + "ext"
return version
def template(outputfile):
with open (execDir + templateDir + outputfile + ".in", "rU") as file:
return file.read()
def supportedLambda(obj):
return lambda feature, core, ext: ( not ext and obj.supported(feature, core)
or ext and not obj.supported(feature, False) )
def enumSuffixPriority(name):
index = name.rfind("_")
if index < 0:
return -1
ext = name[index + 1:]
if ext not in Extension.suffixes:
return -1
return Extension.suffixes.index(ext)
class Generator:
renderer = None
@classmethod
def generate(_class, context, outputPath, templateName=None):
if _class.renderer is None:
_class.renderer = pystache.Renderer(search_dirs=os.path.join(execDir, templateDir),
file_extension=templateExtension,
escape=lambda u: u)
outputDir = os.path.dirname(outputPath).format(**context)
if not os.path.exists(outputDir):
os.makedirs(outputDir)
outputFile = os.path.basename(outputPath)
if templateName is None:
templateName = outputFile
outputFile = outputFile.format(**context)
print("generating {} in {}".format(outputFile, outputDir)) #TODO-LW move logging to appropriate place
with open(os.path.join(outputDir, outputFile), 'w') as file:
file.write(_class.renderer.render_name(templateName, context))
class Status:
targetdir = ""
def status(file):
print("generating " + file.replace(Status.targetdir, ""))
# enum_binding_name_exceptions = [ "DOMAIN", "MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB", "FALSE", "TRUE", "NO_ERROR", "WAIT_FAILED" ]
def enumBID(enum):
return enum.name
# extension_binding_name_exceptions = [ ]
# ToDo: discuss - just use name for glbinding?
def extensionBID(extension):
return extension.name
def functionBID(function):
return function.name
def alphabeticallyGroupedLists():
# create a dictionary of lists by upper case letters
# and a single "everythingelse" list
keys = '0ABCDEFGHIJKLMNOPQRSTUVWXYZ'
lists = dict()
for key in keys:
lists[key] = list()
return lists
def alphabeticalGroupKeys():
return [str(c) for c in "0ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
def alphabeticalGroupKey(identifier, prefix):
# derives an key from an identifier with "GL_" prefix
index = identifier.find(prefix)
if index < 0:
return -1
index += len(prefix)
key = ((identifier[index:])[:1]).upper()
if ord(key) not in range(65, 91):
key = '0'
return key
| mit | -650,482,394,610,676,200 | 22.5 | 127 | 0.623279 | false | 3.90232 | false | false | false |
sipsorcery/bitcoin | test/functional/rpc_createmultisig.py | 6 | 10381 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multisig RPCs"""
import binascii
import decimal
import itertools
import json
import os
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.authproxy import JSONRPCException
from test_framework.descriptors import descsum_create, drop_origins
from test_framework.key import ECPubKey, ECKey
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
)
from test_framework.wallet_util import bytes_to_wif
class RpcCreateMultiSigTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_keys(self):
self.pub = []
self.priv = []
node0, node1, node2 = self.nodes
for _ in range(self.nkeys):
k = ECKey()
k.generate()
self.pub.append(k.get_pubkey().get_bytes().hex())
self.priv.append(bytes_to_wif(k.get_bytes(), k.is_compressed))
self.final = node2.getnewaddress()
def run_test(self):
node0, node1, node2 = self.nodes
self.check_addmultisigaddress_errors()
self.log.info('Generating blocks ...')
node0.generate(149)
self.sync_all()
self.moved = 0
for self.nkeys in [3, 5]:
for self.nsigs in [2, 3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
# Test mixed compressed and uncompressed pubkeys
self.log.info('Mixed compressed and uncompressed multisigs are not allowed')
pk0 = node0.getaddressinfo(node0.getnewaddress())['pubkey']
pk1 = node1.getaddressinfo(node1.getnewaddress())['pubkey']
pk2 = node2.getaddressinfo(node2.getnewaddress())['pubkey']
# decompress pk2
pk_obj = ECPubKey()
pk_obj.set(binascii.unhexlify(pk2))
pk_obj.compressed = False
pk2 = binascii.hexlify(pk_obj.get_bytes()).decode()
node0.createwallet(wallet_name='wmulti0', disable_private_keys=True)
wmulti0 = node0.get_wallet_rpc('wmulti0')
# Check all permutations of keys because order matters apparently
for keys in itertools.permutations([pk0, pk1, pk2]):
# Results should be the same as this legacy one
legacy_addr = node0.createmultisig(2, keys, 'legacy')['address']
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'legacy')['address'])
# Generate addresses with the segwit types. These should all make legacy addresses
assert_equal(legacy_addr, wmulti0.createmultisig(2, keys, 'bech32')['address'])
assert_equal(legacy_addr, wmulti0.createmultisig(2, keys, 'p2sh-segwit')['address'])
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'bech32')['address'])
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'p2sh-segwit')['address'])
self.log.info('Testing sortedmulti descriptors with BIP 67 test vectors')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_bip67.json'), encoding='utf-8') as f:
vectors = json.load(f)
for t in vectors:
key_str = ','.join(t['keys'])
desc = descsum_create('sh(sortedmulti(2,{}))'.format(key_str))
assert_equal(self.nodes[0].deriveaddresses(desc)[0], t['address'])
sorted_key_str = ','.join(t['sorted_keys'])
sorted_key_desc = descsum_create('sh(multi(2,{}))'.format(sorted_key_str))
assert_equal(self.nodes[0].deriveaddresses(sorted_key_desc)[0], t['address'])
# Check that bech32m is currently not allowed
assert_raises_rpc_error(-5, "createmultisig cannot create bech32m multisig addresses", self.nodes[0].createmultisig, 2, self.pub, "bech32m")
def check_addmultisigaddress_errors(self):
if self.options.descriptors:
return
self.log.info('Check that addmultisigaddress fails when the private keys are missing')
addresses = [self.nodes[1].getnewaddress(address_type='legacy') for _ in range(2)]
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
for a in addresses:
# Importing all addresses should not change the result
self.nodes[0].importaddress(a)
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
# Bech32m address type is disallowed for legacy wallets
pubs = [self.nodes[1].getaddressinfo(addr)["pubkey"] for addr in addresses]
assert_raises_rpc_error(-5, "Bech32m multisig addresses cannot be created with legacy wallets", self.nodes[0].addmultisigaddress, 2, pubs, "", "bech32m")
def checkbalances(self):
node0, node1, node2 = self.nodes
node0.generate(COINBASE_MATURITY)
self.sync_all()
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert 150 < height < 350
total = 149 * 50 + (height - 149 - 100) * 25
assert bal1 == 0
assert bal2 == self.moved
assert bal0 + bal1 + bal2 == total
def do_multisig(self):
node0, node1, node2 = self.nodes
if 'wmulti' not in node1.listwallets():
try:
node1.loadwallet('wmulti')
except JSONRPCException as e:
path = os.path.join(self.options.tmpdir, "node1", "regtest", "wallets", "wmulti")
if e.error['code'] == -18 and "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path) in e.error['message']:
node1.createwallet(wallet_name='wmulti', disable_private_keys=True)
else:
raise
wmulti = node1.get_wallet_rpc('wmulti')
# Construct the expected descriptor
desc = 'multi({},{})'.format(self.nsigs, ','.join(self.pub))
if self.output_type == 'legacy':
desc = 'sh({})'.format(desc)
elif self.output_type == 'p2sh-segwit':
desc = 'sh(wsh({}))'.format(desc)
elif self.output_type == 'bech32':
desc = 'wsh({})'.format(desc)
desc = descsum_create(desc)
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
assert_equal(desc, msig['descriptor'])
if self.output_type == 'bech32':
assert madd[0:4] == "bcrt" # actually a bech32 address
# compare against addmultisigaddress
msigw = wmulti.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
assert_equal(desc, drop_origins(msigw['descriptor']))
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd == v["scriptPubKey"]["address"]]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
node0.generate(1)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
prevtx_err = dict(prevtxs[0])
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "Missing redeemScript/witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# if witnessScript specified, all ok
prevtx_err["witnessScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# both specified, also ok
prevtx_err["redeemScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript mismatch to witnessScript
prevtx_err["redeemScript"] = "6a" # OP_RETURN
assert_raises_rpc_error(-8, "redeemScript does not correspond to witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript does not match scriptPubKey
del prevtx_err["witnessScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# witnessScript does not match scriptPubKey
prevtx_err["witnessScript"] = prevtx_err["redeemScript"]
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs - 1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], 0)
blk = node0.generate(1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
wmulti.unloadwallet()
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
| mit | 4,962,217,062,874,460,000 | 44.331878 | 174 | 0.636451 | false | 3.551488 | true | false | false |
ppramesi/JOANNA | utils.py | 1 | 8031 | import wave, struct, time
import numpy as np
import scipy.io.wavfile as wav
import scipy.fftpack as fft
def openWavFile(fileName):
data = wav.read(fileName)
ssize = data[1].shape[0]
nparray = data[1].astype('float32')
return nparray
def stftWindowFunction(xPhi, xMag):
oldShapePhi = xPhi.shape
oldShapeMag = xMag.shape
xPhi = np.reshape(xPhi, (-1, xPhi.shape[-1]))
xMag = np.reshape(xMag, (-1, xMag.shape[-1]))
retValPhi = []
retValMag = []
for xValPhi, xValMag in zip(xPhi, xMag):
w = np.hanning(xValPhi.shape[0])
phiObj = np.zeros(xValPhi.shape[0], dtype=complex)
phiObj.real, phiObj.imag = np.cos(xValPhi), np.sin(xValPhi)
xIfft = np.fft.ifft(xValMag * phiObj)
wFft = np.fft.fft(w*xIfft.real)
retValPhi.append(np.angle(wFft))
retValMag.append(np.abs(wFft))
retValMag = np.reshape(retValMag, oldShapeMag)
retValPhi = np.reshape(retValPhi, oldShapePhi)
return retValPhi, retValMag
def stft(x, framesz, hop):
framesamp = int(framesz)
hopsamp = int(hop)
#w = np.hanning(framesamp)
X = np.asarray([np.fft.fft(x[i:i+framesamp]) for i in range(0, len(x) - framesamp, hopsamp)])
xPhi = np.angle(X)
xMag = np.abs(X)
return xPhi, xMag
def istft(X, fs, hop, origs):
x = np.zeros(origs)
framesamp = X.shape[1]
hopsamp = int(hop*fs)
for n,i in enumerate(range(0, len(x)-framesamp, hopsamp)):
x[i:i+framesamp] += np.real(np.fft.ifft(X[n]))
return x
def waveToSTFT(waveData, sampCount, blkSize, hop):
initLen = len(waveData)
sampSize = int(initLen/sampCount)
phiObj = []
magObj = []
for sInd in xrange(0, sampCount):
tempTmSpls = []
sampBlk = waveData[sInd * sampSize:(sInd + 1) * sampSize]
stftPhi, stftMag = stft(sampBlk, blkSize, hop)
phiObj.append(stftPhi)
magObj.append(stftMag)
return ([], np.asarray(phiObj), np.asarray(magObj))
def waveToMFCC(waveData, sampCount, blkCount=False, blkSize=False):
waveLen = len(waveData)
sampSize = int(waveLen/sampCount)
retTmSpl = []
if blkSize:
blkCount = sampSize/blkSize
elif blkCount:
blkSize = sampSize/blkCount
else:
return False
for sInd in xrange(0, sampCount):
tempTmSpls = []
sampBlk = waveData[sInd * sampSize:(sInd + 1) * sampSize]
for bInd in xrange(0, blkCount):
tempBlk = sampBlk[bInd * blkSize:(bInd + 1) * blkSize]
complexSpectrum = np.fft.fft(tempBlk)
powerSpectrum = np.abs(complexSpectrum) ** 2
filteredSpectrum = powerSpectrum
logSpectrum = np.log(filteredSpectrum)
dctSpectrum = fft.dct(logSpectrum, type=2)
tempTmSpls.append(dctSpectrum)
retTmSpl.append(tempTmSpls)
retTmSpl = np.asarray(retTmSpl)
return retTmSpl
def waveToBlock(waveData, sampCount, blkCount=False, blkSize=False, olapf=1, shift=False):
if shift:
waveData = np.concatenate((waveData[shift:], waveData[:shift]))
waveLen = len(waveData)
sampSize = int(waveLen/sampCount)
retPhase = []
retMag = []
retTmSpl = []
if blkSize and blkCount:
tlen = sampCount * blkCount * blkSize
sampSize = blkCount * blkSize
diff = tlen - waveLen
if diff > 0:
waveData = np.pad(waveData, (0,diff), 'constant', constant_values=0)
elif blkSize:
blkCount = sampSize/blkSize
elif blkCount:
blkSize = sampSize/blkCount
else:
return False
for sInd in xrange(0, sampCount):
tempPhases = []
tempMags = []
tempTmSpls = []
sampBlk = waveData[sInd * sampSize:(sInd + 1) * sampSize]
for bInd in xrange(0, blkCount - (olapf - 1)):
tempBlk = sampBlk[bInd * blkSize:(bInd + olapf) * blkSize]
tempFFT = np.fft.fft(tempBlk)
tempPhase = np.angle(tempFFT)
tempMagn = np.abs(tempFFT)
tempPhases.append(tempPhase)
tempMags.append(tempMagn)
tempTmSpls.append(tempBlk)
retPhase.append(tempPhases)
retMag.append(tempMags)
retTmSpl.append(tempTmSpls)
retPhase = np.asarray(retPhase)
retTmSpl = np.asarray(retTmSpl)
retMag = np.asarray(retMag)
return (retTmSpl, retPhase, retMag)
def sectionFeatureScaling(data):
dataShape = data.shape
flatData = np.copy(data).flatten()
flatMax = np.max(flatData)
flatMin = np.min(flatData)
scaledData = (flatData - flatMin)/(flatMax- flatMin)
scaledData = np.reshape(scaledData, dataShape)
return scaledData, flatMax, flatMin
def blockFeatureScaling(kData):
data = np.copy(kData)
maxVal = np.max(np.max(data, axis=0), axis=0)
minVal = np.min(np.min(data, axis=0), axis=0)
scaledData = (data - minVal)/(maxVal- minVal)
return scaledData, maxVal, minVal
def sectionNormalize(data):
dataShape = data.shape
flatData = np.copy(data).flatten()
flatMean = np.mean(flatData)
flatStd = np.std(flatData)
scaledData = (flatData - flatMean)/flatStd
scaledData = np.reshape(scaledData, dataShape)
return scaledData, flatMean, flatStd
def blockNormalize(data):
dataStartShape = data.shape
if len(data.shape) == 2:
data = np.reshape(data, (1, data.shape[0], data.shape[1]))
if len(data.shape) == 1:
data = np.reshape(data, (1, 1, data.shape[0]))
npNorm = np.zeros_like(data)
xCount = data.shape[0]
yCount = data.shape[1]
for sectInd in xrange(xCount):
for blockInd in xrange(yCount):
npNorm[sectInd][blockInd] = data[sectInd][blockInd]
mean = np.mean(np.mean(npNorm, axis=0), axis=0)
std = np.sqrt(np.mean(np.mean(np.abs(npNorm-mean)**2, axis=0), axis=0))
std = np.maximum(1.0e-8, std)
norm = npNorm.copy()
norm[:] -= mean
norm[:] /= std
return norm, mean, std
def extractSTFTWaveData(wavData, sampCount, blkSize=False, returnObj="all", olapf=100):#(waveData, sampCount, blkCount, blkSize, hop):
#wavData = openWavFile(fileName)
#wavObj, wavPhi, wavMag = waveToBlock(wavData, sampCount, blkCount=blkCount, blkSize=blkSize, olapf=olapf, shift=False)
wavObj, wavPhi, wavMag = waveToSTFT(wavData, sampCount, blkSize=blkSize, hop=olapf)
#mfccObj = waveToMFCC(wavData, sampCount, blkCount, blkSize)
phiWav, meanPhiWav, stdPhiWav = blockNormalize(wavPhi)
magWav, meanMagWav, stdMagWav = blockNormalize(wavMag)
#MfccWav, meanMfcc, stdMfcc = blockNormalize(mfccObj)
if returnObj == "phase":
return phiWav, meanPhiWav, stdPhiWav
elif returnObj == "magnitude":
return magWav, meanMagWav, stdMagWav
else:
return phiWav, meanPhiWav, stdPhiWav, magWav, maxMagWav, minMagWav
def extractWaveData(wavData, sampCount, blkCount=False, blkSize=False, returnObj="all", olapf=1, shift=False):
#wavData = openWavFile(fileName)
wavObj, wavPhi, wavMag = waveToBlock(wavData, sampCount, blkCount=blkCount, blkSize=blkSize, olapf=olapf, shift=False)
#mfccObj = waveToMFCC(wavData, sampCount, blkCount, blkSize)
phiWav, meanPhiWav, stdPhiWav = blockNormalize(wavPhi)
magWav, meanMagWav, stdMagWav = blockNormalize(wavMag)
#MfccWav, meanMfcc, stdMfcc = blockNormalize(mfccObj)
if returnObj == "phase":
return phiWav, meanPhiWav, stdPhiWav
elif returnObj == "magnitude":
return magWav, meanMagWav, stdMagWav
else:
return phiWav, meanPhiWav, stdPhiWav, magWav, maxMagWav, minMagWav
def blockShift(data, shift=1):
retObj = []
for sectInd in xrange(data.shape[0]):
retObj.append( np.concatenate((data[sectInd][shift:], data[sectInd][0:shift])) )
return np.reshape(retObj, data.shape) | mit | 5,391,299,308,794,588,000 | 36.252381 | 134 | 0.628191 | false | 2.986612 | false | false | false |
niosus/EasyClangComplete | tests/test_subl_bridge.py | 1 | 3140 | """Test tools.
Attributes:
easy_clang_complete (module): this plugin module
SublBridge (SublBridge): class for subl bridge
"""
import imp
from os import path
from EasyClangComplete.plugin.settings import settings_manager
from EasyClangComplete.plugin.utils.subl import subl_bridge
from EasyClangComplete.tests.gui_test_wrapper import GuiTestWrapper
imp.reload(settings_manager)
imp.reload(subl_bridge)
SettingsManager = settings_manager.SettingsManager
SublBridge = subl_bridge.SublBridge
PosStatus = subl_bridge.PosStatus
class test_tools_command(GuiTestWrapper):
"""Test sublime commands."""
def set_text(self, string):
"""Set text to a view.
Args:
string (str): some string to set
"""
self.view.run_command("insert", {"characters": string})
def move(self, dist, forward=True):
"""Move the cursor by distance.
Args:
dist (int): pixels to move
forward (bool, optional): forward or backward in the file
"""
for _ in range(dist):
self.view.run_command("move",
{"by": "characters", "forward": forward})
def test_next_line(self):
"""Test returning next line."""
self.set_up_view()
self.set_text("hello\nworld!")
self.move(10, forward=False)
next_line = SublBridge.next_line(self.view)
self.assertEqual(next_line, "world!")
def test_wrong_triggers(self):
"""Test that we don't complete on numbers and wrong triggers."""
self.set_up_view(path.join(path.dirname(__file__),
'test_files',
'test_wrong_triggers.cpp'))
# Load the completions.
manager = SettingsManager()
settings = manager.user_settings()
# Check the current cursor position is completable.
self.assertEqual(self.get_row(2), " a > 2.")
# check that '>' does not trigger completions
pos = self.view.text_point(2, 5)
current_word = self.view.substr(self.view.word(pos))
self.assertEqual(current_word, "> ")
status = SublBridge.get_pos_status(pos, self.view, settings)
# Verify that we got the expected completions back.
self.assertEqual(status, PosStatus.WRONG_TRIGGER)
# check that 'a' does not trigger completions
pos = self.view.text_point(2, 3)
current_word = self.view.substr(self.view.word(pos))
self.assertEqual(current_word, "a")
status = SublBridge.get_pos_status(pos, self.view, settings)
# Verify that we got the expected completions back.
self.assertEqual(status, PosStatus.COMPLETION_NOT_NEEDED)
# check that '2.' does not trigger completions
pos = self.view.text_point(2, 8)
current_word = self.view.substr(self.view.word(pos))
self.assertEqual(current_word, ".\n")
status = SublBridge.get_pos_status(pos, self.view, settings)
# Verify that we got the expected completions back.
self.assertEqual(status, PosStatus.WRONG_TRIGGER)
| mit | -4,480,479,087,521,750,500 | 32.052632 | 75 | 0.62707 | false | 3.929912 | true | false | false |
varnivey/hakoton_images | gui/table_widget.py | 1 | 7038 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Grigoriy A. Armeev, 2015
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as·
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License v2 for more details.
# Cheers, Satary.
#
from PyQt4 import QtGui,QtCore
import sys, csv
class TableWidget(QtGui.QTableWidget):
def __init__(self,parent=None):
super(TableWidget, self).__init__(parent)
# if you want to use parent's methods and be ugly as I do, use parent =)
self.parent=parent
self.clip = QtGui.QApplication.clipboard()
self.horizontalHeader().setMovable(True)
self.verticalHeader().setMovable(True)
self.horizontalHeader().setDefaultSectionSize(60)
self.setMinimumWidth(250)
self.setMinimumHeight(250)
self.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.rowOrder=[]
self.columnOrder=[]
self.verticalHeader().sectionMoved.connect( self.getOrders)
self.horizontalHeader().sectionMoved.connect( self.getOrders)
def buildFromDict(self,inDict,rowOrder=[],columnOrder=[]):
self.setRowCount(0)
self.setColumnCount(0)
# finding all rows and cols in dict
newRow = []
newCol = []
for row in inDict:
if not(row in newRow):
newRow.append(row)
for col in inDict[row]:
if not(col in newCol):
newCol.append(col)
# adding new rows and cols in dict
sortNewRow=[]
sortNewCol=[]
for row in inDict:
if not(row in rowOrder):
sortNewRow.append(row)
for col in inDict[row]:
if not(col in columnOrder):
sortNewCol.append(col)
sortNewRow.sort()
sortNewCol.sort()
[rowOrder.append(row) for row in sortNewRow]
[columnOrder.append(col) for col in sortNewCol]
# creating ordered list of not empty values
visibleRows = []
visibleCols = []
for row in rowOrder:
if row in newRow:
visibleRows.append(row)
for col in columnOrder:
if col in newCol:
visibleCols.append(col)
#drawin table and asigning row and column names
rows=[]
columns=[]
for row in visibleRows:
#if row in inDict:
rows.append(row)
self.insertRow(self.rowCount())
self.setVerticalHeaderItem(self.rowCount()-1, QtGui.QTableWidgetItem(row))
for col in visibleCols:
#if (col in inDict[row]):
if (not(col in columns)):
columns.append(col)
self.insertColumn(self.columnCount())
self.setHorizontalHeaderItem(self.columnCount()-1,QtGui.QTableWidgetItem(col))
#asidning values
for row in rows:
for col in columns:
try:
item=QtGui.QTableWidgetItem(str(inDict[row][col]))
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.setItem(rows.index(row),columns.index(col),item)
except:
pass
self.verticalHeader().setDefaultSectionSize(self.verticalHeader().minimumSectionSize())
self.rowOrder = rowOrder #rows
self.columnOrder = columnOrder #columns
def getOrders(self,event=None):
#try:
rowNames = [str(self.verticalHeaderItem(i).text()) for i in range(self.rowCount())]
rowIndx = [self.visualRow(i) for i in range(self.rowCount())]
rowOrder = [x for (y,x) in sorted(zip(rowIndx,rowNames))]
for row in self.rowOrder:
if not(row in rowOrder):
rowOrder.append(row)
self.rowOrder = rowOrder
colNames = [str(self.horizontalHeaderItem(i).text()) for i in range(self.columnCount())]
colIndx = [self.visualColumn(i) for i in range(self.columnCount())]
columnOrder = [x for (y,x) in sorted(zip(colIndx,colNames))]
for col in self.columnOrder:
if not(col in columnOrder):
columnOrder.append(col)
self.columnOrder = columnOrder
def keyPressEvent(self, e):
if (e.modifiers() & QtCore.Qt.ControlModifier):
if e.key() == QtCore.Qt.Key_C:
self.copySelectionToClipboard()
def contextMenuEvent(self, pos):
menu = QtGui.QMenu()
copyAction = menu.addAction("Copy")
action = menu.exec_(QtGui.QCursor.pos())
if action == copyAction:
self.copySelectionToClipboard()
def handleSave(self,path):
rowLog = range(self.rowCount())
rowIndx = [self.visualRow(i) for i in rowLog]
rowVis = [x for (y,x) in sorted(zip(rowIndx,rowLog))]
colLog = range(self.columnCount())
colIndx = [self.visualColumn(i) for i in colLog]
colVis = [x for (y,x) in sorted(zip(colIndx,colLog))]
with open(unicode(path), 'wb') as stream:
writer = csv.writer(stream)
rowdata = []
rowdata.append("")
for column in colVis:
rowdata.append(unicode(self.horizontalHeaderItem(column).text()).encode('utf8'))
writer.writerow(rowdata)
for row in rowVis:
rowdata = []
rowdata.append(unicode(self.verticalHeaderItem(row).text()).encode('utf8'))
for column in colVis:
item = self.item(row, column)
if item is not None:
rowdata.append(
unicode(item.text()).encode('utf8'))
else:
rowdata.append('')
writer.writerow(rowdata)
def copySelectionToClipboard(self):
selected = self.selectedRanges()
s = ""
for r in xrange(selected[0].topRow(),selected[0].bottomRow()+1):
for c in xrange(selected[0].leftColumn(),selected[0].rightColumn()+1):
try:
s += str(self.item(r,c).text()) + "\t"
except AttributeError:
s += "\t"
s = s[:-1] + "\n" #eliminate last '\t'
self.clip.setText(s)
def main():
app = QtGui.QApplication(sys.argv)
ex = TableWidget()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| gpl-2.0 | -8,784,571,887,633,566,000 | 36.232804 | 98 | 0.557198 | false | 4.103207 | false | false | false |
ewels/MultiQC | multiqc/utils/log.py | 1 | 3660 | #!/usr/bin/env python
"""
Code to initilise the MultiQC logging
"""
import coloredlogs
import logging
import os
import shutil
import sys
import tempfile
from multiqc.utils import config, util_functions, mqc_colour
LEVELS = {0: "INFO", 1: "DEBUG"}
log_tmp_dir = None
log_tmp_fn = "/dev/null"
def init_log(logger, loglevel=0, no_ansi=False):
"""
Initializes logging.
Prints logs to console with level defined by loglevel
Also prints verbose log to the multiqc data directory if available.
(multiqc_data/multiqc.log)
Args:
loglevel (str): Determines the level of the log output.
"""
# File for logging
global log_tmp_dir, log_tmp_fn
log_tmp_dir = tempfile.mkdtemp()
log_tmp_fn = os.path.join(log_tmp_dir, "multiqc.log")
# Logging templates
debug_template = "[%(asctime)s] %(name)-50s [%(levelname)-7s] %(message)s"
info_template = "|%(module)18s | %(message)s"
# Base level setup
logger.setLevel(getattr(logging, "DEBUG"))
# Automatically set no_ansi if not a tty terminal
if not no_ansi:
if not sys.stderr.isatty() and not force_term_colors():
no_ansi = True
# Set up the console logging stream
console = logging.StreamHandler()
console.setLevel(getattr(logging, loglevel))
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES
level_styles["debug"] = {"faint": True}
field_styles = coloredlogs.DEFAULT_FIELD_STYLES
field_styles["module"] = {"color": "blue"}
if loglevel == "DEBUG":
if no_ansi:
console.setFormatter(logging.Formatter(debug_template))
else:
console.setFormatter(
coloredlogs.ColoredFormatter(fmt=debug_template, level_styles=level_styles, field_styles=field_styles)
)
else:
if no_ansi:
console.setFormatter(logging.Formatter(info_template))
else:
console.setFormatter(
coloredlogs.ColoredFormatter(fmt=info_template, level_styles=level_styles, field_styles=field_styles)
)
logger.addHandler(console)
# Now set up the file logging stream if we have a data directory
file_handler = logging.FileHandler(log_tmp_fn, encoding="utf-8")
file_handler.setLevel(getattr(logging, "DEBUG")) # always DEBUG for the file
file_handler.setFormatter(logging.Formatter(debug_template))
logger.addHandler(file_handler)
def move_tmp_log(logger):
"""Move the temporary log file to the MultiQC data directory
if it exists."""
try:
# https://stackoverflow.com/questions/15435652/python-does-not-release-filehandles-to-logfile
logging.shutdown()
shutil.copy(log_tmp_fn, os.path.join(config.data_dir, "multiqc.log"))
os.remove(log_tmp_fn)
util_functions.robust_rmtree(log_tmp_dir)
except (AttributeError, TypeError, IOError):
pass
def get_log_stream(logger):
"""
Returns a stream to the root log file.
If there is no logfile return the stderr log stream
Returns:
A stream to the root log file or stderr stream.
"""
file_stream = None
log_stream = None
for handler in logger.handlers:
if isinstance(handler, logging.FileHandler):
file_stream = handler.stream
else:
log_stream = handler.stream
if file_stream:
return file_stream
return log_stream
def force_term_colors():
"""
Check if any environment variables are set to force Rich to use coloured output
"""
if os.getenv("GITHUB_ACTIONS") or os.getenv("FORCE_COLOR") or os.getenv("PY_COLORS"):
return True
return None
| gpl-3.0 | 9,199,252,740,111,501,000 | 29.247934 | 118 | 0.657377 | false | 3.808533 | false | false | false |
xiaofeiyangyang/physpetools | physpetool/phylotree/retrieveprotein.py | 1 | 9001 | # ########################## Copyrights and License #############################
# #
# Copyright 2016 Yang Fang <[email protected]> #
# #
# This file is part of PhySpeTree. #
# https://xiaofeiyangyang.github.io/physpetools/ #
# #
# PhySpeTree is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PhySpeTree is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PhySpeTree. If not, see <http://www.gnu.org/licenses/>. #
# #
# ###############################################################################
"""
The module retrieve highly conserved proteins and download from KEGG database
"""
import shutil
import glob
import ftplib
import os
import sqlite3
import time
from physpetool.database.dbpath import getlocaldbpath
from physpetool.phylotree.log import getLogging
from physpetool.tools.keggapi import getprotein
logretrieveprotein = getLogging('KEGG INDEX DB')
KEGGDB = "KEGG_DB_3.0.db"
def getspecies(spelist, colname):
"""
Get species protein index for DB
:param name: a list contain abbreviation species nam
:param colname: a list contain colname of DB
:return: a list contain protein index can be retrieved and a match ko list (is a ko id list)
"""
dbpath = getlocaldbpath()
db = os.path.join(dbpath, KEGGDB)
relist = []
match_ko_name = []
conn = sqlite3.connect(db)
conn.text_factory = str
c = conn.cursor()
if len(spelist) >= 1000:
sp = splist(spelist,500)
else:
sp = [spelist]
for ko in colname:
tem_reslist = []
tem_none = 0
for line in sp:
connect = "' OR NAME = '".join(line)
query = "SELECT " + ko + " FROM proindex WHERE NAME = '" + connect + "'"
c.execute(query)
ids = list(c.fetchall())
idslist = [str(x[0]) for x in ids]
num_none = len([x for x in idslist if x == 'None'])
tem_none += num_none
tem_reslist.extend(idslist)
if tem_none != len(tem_reslist):
relist.append(tem_reslist)
match_ko_name.append(ko)
c.close()
return relist, match_ko_name
def getcolname():
"""get BD colnames"""
dbpath = getlocaldbpath()
db = os.path.join(dbpath, KEGGDB)
conn = sqlite3.connect(db)
conn.text_factory = str
c = conn.cursor()
c.execute("SELECT * FROM proindex")
col_name_list = [tuple[0] for tuple in c.description]
c.close()
return col_name_list[2:]
def splist(l, s):
"""split a list to sub list contain s"""
return [l[i:i + s] for i in range(len(l)) if i % s == 0]
def retrieveprotein(proindexlist, outpath, matchlist, spelist, local_db):
"""
Retrieve proteins form Kegg DB
:param proindexlist: a list contain protein index
:param outpath: user input outpath
:return: retrieve protein path
"""
timeformat = '%Y%m%d%H%M%S'
timeinfo = str(time.strftime(timeformat))
subdir = 'temp/conserved_protein' + timeinfo
dirname = os.path.dirname(outpath)
dirname = os.path.join(dirname, subdir)
if not os.path.exists(dirname):
os.makedirs(dirname)
fasta = {}
p = 1
# get hcp proteins form ftp server
if local_db == "":
connect = ftplib.FTP("173.255.208.244")
connect.login('anonymous')
connect.cwd('/pub/databasehcp')
for line in spelist:
w_file = dirname + "/" + line + ".fa"
fw_ = open(w_file, 'ab')
retrievename = line + '.fasta'
remoteFileName = 'RETR ' + os.path.basename(retrievename)
connect.retrbinary(remoteFileName, fw_.write)
fw_.write(b'\n')
fw_.close()
logretrieveprotein.info("Retrieve " + line + " highly conserved proteins completed")
# read get sequences
with open(w_file, 'r') as f:
for line in f:
if line != "\n":
tem = line.strip()
if tem[0] == '>':
header = tem[1:]
else:
sequence = tem
fasta[header] = fasta.get(header, '') + sequence
connect.quit()
# get protein sequence from local
else:
for line in spelist:
file_name = line +".fasta"
file_name_new = line + ".fa"
abb_data_path = os.path.join(local_db,file_name)
abb_data_path_new = os.path.join(dirname,file_name_new)
shutil.copyfile(abb_data_path,abb_data_path_new)
logretrieveprotein.info("Retrieve " + line + " highly conserved proteins completed")
# read get sequences
with open(abb_data_path_new, 'r') as f:
for line in f:
if line != "\n":
tem = line.strip()
if tem[0] == '>':
header = tem[1:]
else:
sequence = tem
fasta[header] = fasta.get(header, '') + sequence
for index in proindexlist:
have_none = False
none_num = len([x for x in index if x == 'None'])
app_spe = []
if none_num != len(index):
q_index = [var for var in index if var != 'None']
have_spe = [x.split(":")[0] for x in q_index]
app_spe = [x for x in spelist if x not in have_spe]
have_none = True
else:
q_index = index
hcp_pro_name = hcp_name(matchlist[p - 1])
wfiles = "{0}/p{1}.fasta".format(dirname, p)
fw = open(wfiles, 'a')
for id in q_index:
abb_name = id.strip().split(":")[0]
if id in fasta.keys():
fw.write(">"+abb_name+"\n"+fasta[id]+"\n")
else:
name_none = ">" + abb_name + "\n"
fw.write(name_none + "M" + "\n")
if have_none:
for line in app_spe:
name_none = ">" + line + "\n"
fw.write(name_none + "M" + "\n")
fw.close()
logretrieveprotein.info(
"Retrieve and download of highly conserved protein '{0}' was successful store in p{1}.fasta file".format(
hcp_pro_name, str(p)))
p += 1
logretrieveprotein.info("Retrieve from KEGG database " + str(p - 1) + " highly conserved proteins")
for infile in glob.glob(os.path.join(dirname, '*.fa')):
os.remove(infile)
return dirname
def doretrieve(specieslistfile, outpath,local_db):
'''main to retrieve protein from kegg db'''
# spelist = []
# for line in specieslistfile:
# st = line.strip()
# spelist.append(st)
spelist = specieslistfile
logretrieveprotein.info("Reading organisms's names success!")
colname = getcolname()
proindexlist, matchlist = getspecies(spelist, colname)
dirpath = retrieveprotein(proindexlist, outpath, matchlist, spelist,local_db)
return dirpath
def hcp_name(index):
"""get highly conserved protein names from ko list"""
ko_path = getlocaldbpath()
pro_ko = os.path.join(ko_path, "protein_ko.txt")
with open(pro_ko) as ko:
for line in ko:
name = line.strip().split(',')
if name[1] == index:
return name[0]
if __name__ == '__main__':
print(getcolname())
print(getspecies(['swe'], ['K01409']))
# for line in getcolname():
# if getspecies(['mdm'], [line])[0] != []:
# proid = getspecies(['mdm'], [line])[0][0][0]
# print("http://rest.kegg.jp/get/" + proid + "/aaseq")
specieslistfile = ['zma', "ath", "eco"]
outpath = "/home/yangfang/test/alg2/"
doretrieve(specieslistfile, outpath,local_db="")
| gpl-3.0 | -2,210,047,906,401,690,600 | 36.041152 | 117 | 0.510721 | false | 3.873064 | false | false | false |
ExeClim/Isca | src/extra/python/scripts/find_namelists_to_check.py | 1 | 3480 | import subprocess
import os
import glob
from pathlib import Path
import pdb
#A script to find the fortran files within Isca's src directory
#that include namelists, and to check if namelist checking is done.
#find the location of the source code
GFDL_BASE = os.environ['GFDL_BASE']
#setup some output dictionaries and lists
fortran_file_dict = {}
includes_namelist_dict = {}
includes_check_namelist_dict = {}
n_check_namelist_dict = {}
if_def_internal_nml_dict={}
files_with_namelists = []
namelists_to_flag = []
namelists_to_flag_possible = []
#find ALL of the fortran files within GFDL_BASE/src directory
for path in Path(f'{GFDL_BASE}/src/').rglob('*.*90'):
#exclude files with ._ at the start
if path.name[0:2]!='._':
#add all the remaining files to a dictionary
fortran_file_dict[path.name] = path
#go through each file and check if it contains a namelist, and if it does namelist checking
for file_name in fortran_file_dict.keys():
file_path = fortran_file_dict[file_name]
#initialise some of the checking variables
namelist_in_file=False
check_namelist_in_file=False
number_of_checks=0
if_def_internal_nml_in_file=False
#open each of the fortran files
with open(file_path, 'r') as read_obj:
for line in read_obj:
#check if it contains a namelist
if 'namelist /' in line and not namelist_in_file:
namelist_in_file=True
# does it contain the check_nml_error command?
if 'check_nml_error' in line and not check_namelist_in_file:
check_namelist_in_file=True
# count how many times this string is mentioned
if 'check_nml_error' in line:
number_of_checks=number_of_checks+1
#check if there's more than one type of namelist reading available
if '#ifdef INTERNAL_FILE_NML' in line and not if_def_internal_nml_in_file:
if_def_internal_nml_in_file=True
#make a list of those files that do have a namelist
if namelist_in_file:
files_with_namelists.append(file_name)
#make a list of those files that do have a namelist but don't do checking
if namelist_in_file and not check_namelist_in_file:
namelists_to_flag.append(file_name)
#making a list of files that have namelists, that read them in more than one way, and have fewer than 3 mentions of check_nml_error. This is to catch cases where there is some namelist checking taking place, but it's not on all the methods of namelist reading.
if namelist_in_file and if_def_internal_nml_in_file and number_of_checks<3:
namelists_to_flag_possible.append(file_name)
#keep a record of the files that include a namelist
includes_namelist_dict[file_name]=namelist_in_file
#keep a record of the files that do and don't do namelist checking
includes_check_namelist_dict[file_name]=check_namelist_in_file
#keep a record of the number of checks taking place
n_check_namelist_dict[file_name] = number_of_checks
#create a list of files that appear in namelists_to_flag_possible
list_of_filepaths_to_check = [str(fortran_file_dict[path]) for path in namelists_to_flag_possible]
#print the number of checks
print([n_check_namelist_dict[path] for path in namelists_to_flag_possible])
#print the list of files
print(namelists_to_flag_possible)
#print their directories
print(list_of_filepaths_to_check) | gpl-3.0 | -6,630,615,997,728,799,000 | 38.556818 | 264 | 0.694828 | false | 3.561924 | false | false | false |
jlettvin/shmathd | gpgpu/gpu11.py | 1 | 26973 | #!/usr/bin/env python
###############################################################################
# TODO JMP JE JG JL JGE JLE SETJMP LONGJMP DATA LABEL
# TODO discover how to keep code resident and send it new data
# TODO discover how to reference other pixel data for convolution/correlation
# TODO Use Tower of Hanoi separate data stacks for each type and
# make different instructions (or modifiers) for each.
# TODO test whether the BLOCKSIZE approach interferes with referencing
# Perhaps convolve a checkerboard with a Gaussian blur.
###############################################################################
"""gpu11.py implements an RPN kernel constructor.
"""
import re
from sys import (argv, path)
from PIL import (Image)
from time import (time)
from numpy import (array, float32, int32, empty_like, uint8)
path.append('../Banner')
# from pprint import pprint
from Banner import (Banner)
# from operator import (add, sub, mul, div)
# pycuda imports do not pass pylint tests.
# pycuda.autoinit is needed for cuda.memalloc.
import pycuda.autoinit # noqa
from pycuda.driver import (mem_alloc, memcpy_htod, memcpy_dtoh) # noqa
from pycuda.compiler import (SourceModule) # noqa
###############################################################################
class CUDAMathConstants(object):
"""Initialize math constants for the interpreter."""
###########################################################################
def __init__(self, **kw):
"""Initialize math constants class."""
filename = kw.get(
'filename',
'/usr/local/cuda-5.5/targets/x86_64-linux/include/'
'math_constants.h')
self.caselist = []
self.identified = {}
with open('RPN_CUDA_constants.txt', 'w') as manual:
print>>manual, '# RPN CUDA constants'
self.hrule(manual)
print>>manual, '# PUSH CUDA constant onto RPN stack'
self.hrule(manual)
with open(filename) as source:
for line in source:
if line.startswith('#define'):
token = re.findall(r'(\S+)', line)
if len(token) != 3:
continue
define, name, value = token
if '.' not in value:
continue
# if name.endswith('_HI') or name.endswith('_LO'):
# continue
self.identified[name] = value
print>>manual, '%24s: %s' % (name, value)
self.hrule(manual)
###########################################################################
def hrule(self, stream):
"""Debugging: output horizontal rule."""
print>>stream, '#' + '_' * 78
###########################################################################
def functions(self):
"""Prepare function handling."""
end = '/*************************************************************/'
text = ''
for token in self.identified.iteritems():
name, value = token
text += ''.join((
'__device__ int %s\n' % (end),
'RPN_%s_RPN(Thep the) {' % (name),
' IPUP = %s;' % (name),
' return 0;',
'}\n',
))
return text
###########################################################################
def cases(self):
"""Prepare case handling."""
# case = []
# count = 0
for token in self.identified.iteritems():
name, value = token
# case += ['error = RPN_%s_RPN(&the)' % (name), ]
self.caselist += ['{ *dstack++ = %s; }' % (name), ]
return self.caselist
###############################################################################
class CUDAMathFunctions(object):
"""CUDAMathFunctions class"""
found = set()
###########################################################################
def __init__(self, **kw):
"""CUDAMathFunctions __init__"""
clip = kw.get(
'clip',
True)
filename = kw.get(
'filename',
'/usr/local/cuda-5.5/targets/x86_64-linux/include/'
'math_functions.h')
signature = kw.get(
'signature',
'extern __host__ __device__ __device_builtin__ float')
self.caselist = []
with open('RPN_CUDA_functions.txt', 'w') as manual:
print>>manual, '# RPN CUDA functions'
self.hrule(manual)
signatureAB = '(float x, float y)'
signatureA_ = '(float x)'
self.one = {}
self.two = {}
with open(filename) as source:
for line in source:
if line.startswith(signature):
A, B, C = line.partition('float')
if not C:
continue
function = C.strip()
if function.endswith(') __THROW;'):
function = function[:-9]
name, paren, args = function.partition('(')
if name in CUDAMathFunctions.found:
continue
else:
CUDAMathFunctions.found.add(name)
if signatureAB in function:
# print 'AB', function
if clip:
name = name[:-1] # remove f
self.two[name] = name
self.caselist += ['{ ab %s(a, b); }' % (name), ]
elif signatureA_ in function:
# print 'A_', function
self.one[name] = name
self.caselist += ['{ a_ %s(a); }' % (name), ]
else:
continue
print>>manual, '# functions of one float parameter'
print>>manual, '# pop A and push fun(A).'
self.hrule(manual)
for cuda, inner in self.one.iteritems():
print>>manual, 'float %s(float) // %s' % (inner, name)
self.hrule(manual)
print>>manual, '# functions of two float parameters'
print>>manual, '# pop A, pop B and push fun(A, B)'
self.hrule(manual)
for cuda, inner in self.two.iteritems():
print>>manual, 'float %s(float, float) // %s' % (inner, name)
self.hrule(manual)
###########################################################################
def hrule(self, stream):
"""CUDAMathFunctions hrule"""
print>>stream, '#' + '_' * 78
###########################################################################
def functions(self):
"""CUDAMathFunctions functions"""
return ''
###########################################################################
def cases(self):
"""CUDAMathFunctions cases"""
return self.caselist
###############################################################################
class Timing(object):
"""Timing class"""
text = ''
###########################################################################
def __init__(self, msg=''):
"""Timing __init__"""
self.msg = msg
###########################################################################
def __enter__(self):
"""Timing __enter__"""
self.t0 = time()
###########################################################################
def __exit__(self, typ, value, tb):
"""Timing __exit__"""
Timing.text += '%40s: %e\n' % (self.msg, (time() - self.t0))
###############################################################################
class Function(object):
"""Function class"""
###########################################################################
def __init__(self, **kw):
"""Function __init__"""
self.index = kw.get('start', 0)
self.name = {}
self.body = ""
self.case = ""
self.tab = " " * 12
self.final = [0]
self.code = {'#%d' % d: d for d in range(kw.get('bss', 64))}
self.bss = self.code.keys()
for i, name in enumerate(
kw.get('handcode', [
'swap', 'add', 'mul', 'ret', 'sub', 'div',
'call', 'noop', 'invert', 'push', 'pop', 'jmp', ])):
self.add_name(name, i)
###########################################################################
def add_name(self, name, index):
"""Function add_name"""
self.code[name] = index
self.name[index] = name
###########################################################################
def assemble(self, source, DATA, **kw):
"""Function assemble"""
self.label = {'code': [], 'data': [], }
self.data = []
fixups = {}
self.clabels = {}
self.backclabels = {}
self.dlabels = {}
self.backdlabels = {}
self.final = []
extra = 0
for offset, name in enumerate(DATA):
name = str(name)
label, colon, datum = name.partition(':')
if colon:
self.dlabels[label] = offset + extra
self.backdlabels[offset + extra] = label
self.label['data'] += [label, ]
# print '\t\t\tdata', label, offset + extra
else:
datum = label
values = datum.split()
self.data += values
extra += len(values) - 1
# print 'A0', self.backclabels
# print 'B0', self.clabels
for offset, name in enumerate(source):
name = re.sub(' \t', '', name)
label, colon, opname = name.partition(':')
if not colon:
label, opname = None, label
# print 'name = %s', (opname)
else:
assert label not in self.clabels.keys()
self.clabels[label] = offset
self.backclabels[offset] = label
self.label['code'] += [label, ]
# print '\t\t\tcode', label
if opname in self.code.keys():
self.final += [self.code[opname], ]
# print 'instruction'
else:
self.final += [stop, ]
fixups[opname] = fixups.get(opname, []) + [offset, ]
# print 'opname:fixup = %s/%s' %(opname, offset)
for label, offsets in fixups.iteritems():
if not label:
continue
if label in self.clabels:
for offset in offsets:
self.final[offset] = self.clabels[label]
if (not self.final) or (self.final[-1] != stop):
self.final += [stop, ]
# print 'A1', self.backclabels
# print 'B1', self.clabels
if kw.get('verbose', False):
# print source
# print self.final
direct = False
# print '(',
for code in self.final:
if not direct:
name = self.name[code]
# print "'%s'," % (name),
if name in ('push', 'call', 'jmp'):
direct = True
else:
label = self.backclabels.get(code, None)
if offset is None:
# print label, "'#%d'" % (code),
pass
else:
# print "'#%d'," % (code),
pass
direct = False
# print ')'
# print 'A2', self.backclabels
# print 'B2', self.clabels
###########################################################################
def disassemble(self, **kw):
"""Function disassemble"""
verbose = kw.get('verbose', False)
if not verbose:
return
direct = False
# print self.data
# print self.label['data']
# print self.backclabels
print '#'*79
print '.data'
# print '#', self.data
nl = False
comma = ''
for offset, datum in enumerate(self.data):
if not datum:
continue
label = self.backdlabels.get(offset, None)
if label and label in self.label['data']:
if nl:
print
print '%-12s%+11.9f' % (label+':', float(datum)),
comma = ','
else:
print comma + ' %+11.9f' % (float(datum)),
comma = ','
nl = True
print
print '#'*79
print '.code'
# print '#', self.final
for offset, code in enumerate(self.final):
if direct:
clabel = self.backclabels.get(code, None)
if clabel:
print clabel
else:
print '#%d' % (code)
direct = False
else:
label = self.backclabels.get(offset, None)
name = self.name[code]
direct = (name in ('push', 'call', 'jmp'))
if label and label in self.label['code']:
print '%-12s%s' % (label+':', name),
else:
print ' %s' % (name),
if not direct:
print
print '.end'
print '#'*79
###########################################################################
def add_body(self, fmt, **kw):
"""Function add_body"""
cmt = '/*************************************************************/'
base = "__device__ int " + cmt + "\nRPN_%(name)s_RPN(Thep the) "
self.body += ((base + fmt) % kw) + '\n'
###########################################################################
def add_case(self, **kw):
"""Function add_case"""
k = {'number': self.index}
k.update(kw)
casefmt = "case %(number)d: error = RPN_%(name)s_RPN(&the); break;\n"
self.case += self.tab + casefmt % k
self.code[kw['name']] = self.index
self.add_name(kw['name'], self.index)
###########################################################################
def add_last(self):
"""Function add_last"""
self.index += 1
###########################################################################
def unary(self, **kw):
"""Function unary"""
self.add_case(**kw)
self.add_body("{ A_ %(name)s(A); return 0; }", **kw)
self.add_last()
###########################################################################
def binary(self, **kw):
"""Function binary"""
self.add_case(**kw)
self.add_body("{ AB %(name)s(A,B); return 0; }", **kw)
self.add_last()
###############################################################################
def CudaRPN(inPath, outPath, mycode, mydata, **kw):
"""CudaRPN implements the interface to the CUDA run environment.
"""
verbose = kw.get('verbose', False)
BLOCK_SIZE = 1024 # Kernel grid and block size
STACK_SIZE = 64
# OFFSETS = 64
# unary_operator_names = {'plus': '+', 'minus': '-'}
function = Function(
start=len(hardcase),
bss=64,
handcode=kw.get('handcode'))
with Timing('Total execution time'):
with Timing('Get and convert image data to gpu ready'):
im = Image.open(inPath)
px = array(im).astype(float32)
function.assemble(mycode, mydata, verbose=True)
function.disassemble(verbose=True)
cx = array(function.final).astype(int32)
dx = array(function.data).astype(float32)
with Timing('Allocate mem to gpu'):
d_px = mem_alloc(px.nbytes)
memcpy_htod(d_px, px)
d_cx = mem_alloc(cx.nbytes)
memcpy_htod(d_cx, cx)
d_dx = mem_alloc(dx.nbytes)
memcpy_htod(d_dx, dx)
with Timing('Kernel execution time'):
block = (BLOCK_SIZE, 1, 1)
checkSize = int32(im.size[0]*im.size[1])
grid = (int(im.size[0] * im.size[1] / BLOCK_SIZE) + 1, 1, 1)
kernel = INCLUDE + HEAD + function.body + convolve + TAIL
sourceCode = kernel % {
'pixelwidth': 3,
'stacksize': STACK_SIZE,
'case': function.case}
with open("RPN_sourceCode.c", "w") as target:
print>>target, sourceCode
module = SourceModule(sourceCode)
func = module.get_function("RPN")
func(d_px, d_cx, d_dx, checkSize, block=block, grid=grid)
with Timing('Get data from gpu and convert'):
RPNPx = empty_like(px)
memcpy_dtoh(RPNPx, d_px)
RPNPx = uint8(RPNPx)
with Timing('Save image time'):
pil_im = Image.fromarray(RPNPx, mode="RGB")
pil_im.save(outPath)
# Output final statistics
if verbose:
print '%40s: %s%s' % ('Target image', outPath, im.size)
print Timing.text
###############################################################################
INCLUDE = """// RPN_sourceCode.c
// GENERATED KERNEL IMPLEMENTING RPN ON CUDA
#include <math.h>
"""
HEAD = """
#define a_ float a = *--dstack; *dstack++ =
#define ab float a = *--dstack; float b = *--dstack; *dstack++ =
typedef struct _XY {
int x;
int y;
float n;
} XY, *XYp;
/************************** HANDCODE FUNCTIONS *******************************/
"""
handcode = {
'pop': "{ --dstack; }",
'quit': "{ stop = 1; }",
'noop': "{ }",
'invert': "{ a_ 1.0 - a; }",
'swap': """{
float a = *--dstack;
float b = *--dstack;
*++dstack = a;
*++dstack = b;
} """,
'push': "{ *dstack++ = data[code[ip++]]; }",
'add': "{ ab a + b; }",
'sub': "{ ab a - b; }",
'mul': "{ ab a * b; }",
'div': "{ ab a / b; }",
'call': """{
int to = code[ip++];
cstack[sp++] = ip;
ip = to;
} """,
'ret': "{ ip = cstack[--sp]; }",
'jmp': "{ ip = code[ip]; }",
}
hardcase = []
for i, (case, code) in enumerate(handcode.iteritems()):
hardcase += ['/* %s */ %s' % (case, code), ]
if 'stop' in code:
stop = i
HEAD += """
/************************** CUDA FUNCTIONS ***********************************/
"""
# Name header files and function signatures of linkable functions.
CUDA_sources = {
'/usr/local/cuda-5.5/targets/x86_64-linux/include/math_functions.h': [
'extern __host__ __device__ __device_builtin__ float',
'extern __device__ __device_builtin__ __cudart_builtin__ float',
'extern _CRTIMP __host__ __device__ __device_builtin__ float',
],
'/usr/local/cuda-5.5/targets/x86_64-linux/include/device_functions.h': [
# 'extern __device__ __device_builtin__ __cudart_builtin__ float',
'extern _CRTIMP __host__ __device__ __device_builtin__ float',
# 'extern __device__ __device_builtin__ float',
]
}
INCLUDE += '#include <%s>\n' % ('math_constants.h')
# Ingest header files to make use of linkable functions.
CUDA_constants = CUDAMathConstants()
hardcase += CUDA_constants.cases()
for filename, signatures in CUDA_sources.iteritems():
stars = max(2, 73 - len(filename))
pathname, twixt, basename = filename.partition('/include/')
INCLUDE += '#include <%s>\n' % (basename)
left = stars/2
right = stars - left
left, right = '*' * left, '*' * right
HEAD += '/*%s %s %s*/\n' % (left, filename, right)
for signature in signatures:
CUDA_functions = CUDAMathFunctions(
filename=filename,
signature=signature,
clip=True)
hardcase += CUDA_functions.cases()
###############################################################################
convolve = """
// data: the data field from which to convolve.
// kn: a length L array of coefficients (terminated by 0.0)
// kx: a length L array of x offsets
// ky: a length L array of y offsets
// X: width of data field (stride, not necessarily visible image width)
// Y: height of data field.
// C: color band (0, 1, or 2)
__device__ float planar_convolve(
float *data, float *kn, int *kx, int *ky, int X, int Y, int C)
{
float K = 0.0;
float V = 0.0;
int x0 = (threadIdx.x + blockIdx.x * blockDim.x);
int y0 = (threadIdx.y + blockIdx.y * blockDim.y);
int D = X * Y;
int N = 0;
float ki;
while((ki = *kn++) != 0.0) {
int xi = *kx++;
int yi = *ky++;
int x = (x0-xi);
int y = (y0-yi);
int d = C + (x + y * X) * 3;
if(d < 0 || d >= D) continue;
V += data[d];
K += ki;
N += 1;
};
if(N == 0) {
V = 0.0;
} else {
V /= K*N;
}
return V;
}
//__device__ void planar_ring_test(float *data, int C) {
// float kn[5] = { 1.0, 1.0, 1.0, 1.0 };
// int kx[5] = { +1, 0, -1, 0, 0 };
// int ky[5] = { 0, +1, 0, -1, 0 };
//}
"""
convolutionGPU = """
__global__ void convolutionGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH )
{
//////////////////////////////////////////////////////////////////////
// most slowest way to compute convolution
//////////////////////////////////////////////////////////////////////
// global mem address for this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dataW +
blockIdx.y * blockDim.y * dataW;
float sum = 0;
float value = 0;
for (int i = -KERNEL_RADIUS; i <= KERNEL_RADIUS; i++) // row wise
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) // col wise
{
// check row first
if (blockIdx x == 0 && (threadIdx x + i) < 0) // left apron
value = 0;
else if ( blockIdx x == (gridDim x - 1) &&
(threadIdx x + i) > blockDim x-1 ) // right apron
value = 0;
else
{
// check col next
if (blockIdx y == 0 && (threadIdx y + j) < 0) // top apron
value = 0;
else if ( blockIdx y == (gridDim y - 1) &&
(threadIdx y + j) > blockDim y-1 ) // bottom apron
value = 0;
else // safe case
value = d_Data[gLoc + i + j * dataW];
}
sum += value *
d_Kernel[KERNEL_RADIUS + i] *
d_Kernel[KERNEL_RADIUS + j];
}
d_Result[gLoc] = sum;
}
"""
###############################################################################
TAIL = """
__device__ int machine(int *code, float *data, float *value) {
const float numerator = 255.0;
const float denominator = 1.0 / numerator;
float DSTACK[%(stacksize)d];
int CSTACK[%(stacksize)d];
int opcode;
int error = 0;
int *cstack = &CSTACK[0];
float *dstack = &DSTACK[0];
int ip = 0, sp = 0, stop = 0;
*dstack++ = *value * denominator;
*value = 0.0;
while((!stop) && (opcode = code[ip++]) != 0) {
switch(opcode) {
"""
for i, case in enumerate(hardcase):
TAIL += ' '*12
TAIL += 'case %3d: %-49s; break;\n' % (i, case)
TAIL += """
%(case)s
default: error = opcode; break;
}
stop |= !!error;
}
if(error) {
*value = float(error);
} else {
*value = *--dstack * numerator;
}
return error;
}
__global__ void RPN( float *inIm, int *code, float *data, int check ) {
const int pw = %(pixelwidth)s;
const int idx = (threadIdx.x ) + blockDim.x * blockIdx.x ;
if(idx * pw < check * pw) {
const int offset = idx * pw;
int error = 0;
int c;
for(c=0; c<pw && !error; ++c) {
error += machine(code, data, inIm + offset + c);
}
}
}
"""
###############################################################################
if __name__ == "__main__":
Banner(arg=[argv[0] + ': main', ], bare=True)
if len(argv) == 1:
Banner(arg=[argv[0] + ': default code and data', ], bare=True)
DATA = [0.0, 1.0]
CODE = [
'push', '#1',
'sub',
'noop',
'call', 'here',
'quit',
'here:ret', ]
else:
Banner(arg=[argv[0] + ': code and data from file: ', ], bare=True)
DATA = []
CODE = []
STATE = 0
with open(argv[1]) as source:
for number, line in enumerate(source):
line = line.strip()
if STATE == 0:
if line.startswith('#'):
# print number, 'comment'
continue
elif line.startswith('.data'):
# print number, 'keyword .data'
STATE = 1
else:
assert False, '.data section must come first'
elif STATE == 1:
if line.startswith('#'):
# print number, 'comment'
continue
line = re.sub(r':\s+', ':', line)
if line.startswith('.code'):
# print number, 'keyword .code'
STATE = 2
else:
# print number, 'add data'
DATA += re.split(r'\s+', line)
elif STATE == 2:
if line.startswith('#'):
# print number, 'comment'
continue
line = re.sub(r':\s+', ':', line)
# print number, 'add code'
CODE += re.split(r'\s+', line)
# print '.data\n', '\n'.join([str(datum) for datum in data])
# print '.code\n', '\n'.join(code)
Banner(arg=[argv[0] + ': run in CUDA', ], bare=True)
CudaRPN(
'img/source.png',
'img/target.png',
CODE,
DATA,
handcode=handcode
)
###############################################################################
| gpl-3.0 | 6,278,133,493,561,412,000 | 34.537549 | 79 | 0.412449 | false | 4.151608 | false | false | false |
HossainKhademian/FreeCivAndroid | lib/freeciv/diplodialog.py | 4 | 4811 | # Copyright (C) 2011 Michal Zielinski ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import client.diplomacy
from client.diplomacy import (CLAUSE_ADVANCE, CLAUSE_GOLD, CLAUSE_MAP,
CLAUSE_SEAMAP, CLAUSE_CITY,
CLAUSE_CEASEFIRE, CLAUSE_PEACE, CLAUSE_ALLIANCE,
CLAUSE_VISION, CLAUSE_EMBASSY,
DS_WAR, DS_ARMISTICE, DS_CEASEFIRE, DS_ALLIANCE, DS_PEACE)
import ui
class Meeting(client.diplomacy.Meeting):
def init(self):
self.dialog = None
self.open_dialog()
def create_clause(self, giver, type, value):
self.open_dialog()
self.dialog.add_clause(giver, type, value)
print 'create_clause', giver, type, value
def remove_clause(self, giver, type, value):
print 'remove_clause', giver, type, value
def accept_treaty(self, me, other):
print 'accept_treaty', me, other
self.open_dialog()
self.dialog.set_accept_treaty(me, other)
def open_dialog(self):
if not self.dialog:
self.dialog = MeetingDialog(self)
ui.set_dialog(self.dialog, scroll=True)
class MeetingDialog(ui.LinearLayoutWidget):
def __init__(self, meeting):
super(MeetingDialog, self).__init__()
self.meeting = meeting
self.left = ConditionsWidget(meeting.client.get_playing())
self.right = ConditionsWidget(meeting.counterpart)
c = meeting.counterpart
self.top = ui.HorizontalLayoutWidget()
# Sir!, the %s ambassador has arrived \nWhat are your wishes?
self.top.add(ui.Label('Meeting with '))
self.top.add(ui.Label(' ', image=c.get_flag()))
self.top.add(ui.Label(' %s (%s)' % (c.get_nation_pl(), c.get_name())))
self.add(self.top)
self.middle = ui.HorizontalLayoutWidget(spacing=10)
w = 200
self.middle.add(ui.Bordered(self.left, force_width=w))
self.middle.add(ui.Bordered(self.right, force_width=w))
self.add(self.middle)
self.add(ui.Button('Add condition', self.add_condition))
self.bottom = ui.HorizontalLayoutWidget(spacing=10)
self.bottom.add(ui.Button('Cancel treaty', self.cancel_treaty))
self.bottom.add(ui.Button('Accept treaty', self.accept_treaty))
self.add(self.bottom)
def cancel_treaty(self):
self.meeting.cancel()
ui.back()
def accept_treaty(self):
self.meeting.accept()
def add_condition(self):
def ph(type): # pact handler
def handler():
ui.back()
self.meeting.pact(type)
return handler
panel = ui.LinearLayoutWidget()
c = self.meeting.counterpart
state = c.get_state()
if state not in (DS_ARMISTICE, DS_CEASEFIRE, DS_PEACE, DS_ALLIANCE):
panel.add(ui.Button('Ceasefire', ph(CLAUSE_CEASEFIRE)))
if state not in (DS_PEACE, DS_ALLIANCE):
panel.add(ui.Button('Peace', ph(CLAUSE_PEACE)))
if state not in (DS_ALLIANCE, ):
panel.add(ui.Button('Alliance', ph(CLAUSE_ALLIANCE)))
if not c.gives_shared_vision():
panel.add(ui.Button('Shared vision', ph(CLAUSE_VISION)))
ui.set_dialog(panel)
def add_clause(self, giver, type, value):
if giver == self.meeting.counterpart:
panel = self.right
else:
panel = self.left
panel.add_condition(type, value, self.meeting.get_clause_repr(type, value))
def set_accept_treaty(self, me, other):
self.left.set_accept(me)
self.right.set_accept(other)
class ConditionsWidget(ui.LinearLayoutWidget):
def __init__(self, player):
super(ConditionsWidget, self).__init__()
p = ui.HorizontalLayoutWidget()
p.add(ui.Spacing(10, 0))
p.add(ui.Label(' ', image=player.get_flag()))
p.add(ui.Spacing(10, 0))
self.accepting = ui.Label('?')
p.add(self.accepting)
self.add(p)
self.panel = ui.LinearLayoutWidget()
self.add(self.panel)
def add_condition(self, type, value, string):
self.panel.add(ui.Label(string))
def set_accept(self, b):
if b:
self.accepting.set_text('Accepts')
else:
self.accepting.set_text('Declines')
if __name__ == '__main__':
d = MeetingDialog()
ui.set_dialog(d)
ui.main() | gpl-2.0 | -3,065,350,190,407,030,000 | 32.416667 | 83 | 0.624818 | false | 3.336338 | false | false | false |
smousavi05/EQTransformer | tests/test_downloader.py | 1 | 1620 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 25 22:34:05 2020
@author: mostafamousavi
"""
from EQTransformer.utils.downloader import downloadMseeds, makeStationList, downloadSacs
import pytest
import glob
import os
def test_downloader():
makeStationList(client_list=["SCEDC"],
min_lat=35.50,
max_lat=35.60,
min_lon=-117.80,
max_lon=-117.40,
start_time="2019-09-01 00:00:00.00",
end_time="2019-09-03 00:00:00.00",
channel_list=["HH[ZNE]", "HH[Z21]", "BH[ZNE]", "EH[ZNE]", "SH[ZNE]", "HN[ZNE]", "HN[Z21]", "DP[ZNE]"],
filter_network=["SY"],
filter_station=[])
downloadMseeds(client_list=["SCEDC", "IRIS"],
stations_json='station_list.json',
output_dir="downloads_mseeds",
start_time="2019-09-01 00:00:00.00",
end_time="2019-09-02 00:00:00.00",
min_lat=35.50,
max_lat=35.60,
min_lon=-117.80,
max_lon=-117.40,
chunck_size=1,
channel_list=[],
n_processor=2)
dir_list = [ev for ev in os.listdir('.')]
if ('downloads_mseeds' in dir_list) and ('station_list.json' in dir_list):
successful = True
else:
successful = False
assert successful == True
def test_mseeds():
mseeds = glob.glob("downloads_mseeds/CA06/*.mseed")
assert len(mseeds) > 0
| mit | 5,332,259,601,145,437,000 | 27.421053 | 120 | 0.495679 | false | 3.396226 | false | false | false |
alexei-matveev/ccp1gui | jobmanager/__init__.py | 1 | 1565 | #
# This file is part of the CCP1 Graphical User Interface (ccp1gui)
#
# (C) 2002-2005 CCLRC Daresbury Laboratory
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Job manager and editor Tkinter interface to control jobs
"""
import sys
import jobmanager.job
import jobmanager.ccp1gui_subprocess
import jobmanager.jobeditor
import jobmanager.jobthread
if sys.platform[:3] == 'win':
import jobmanager.winprocess
# Constants
#
# jmht - don't think these are used anywhere?
#MODIFIED = "Modified"
#SUBMITTED = "Submitted"
#RUNNING = "Running"
#KILLED = "Killed"
#DONE = "Done"
#STOPPED = "Stopped"
class JobManager:
def __init__(self):
self.registered_jobs = []
def RegisterJob(self,job):
if job not in self.registered_jobs:
self.registered_jobs.append(job)
def RemoveJob(self,job):
self.registered_jobs.remove(job)
| gpl-2.0 | -8,558,338,639,613,817,000 | 29.096154 | 72 | 0.704792 | false | 3.532731 | false | false | false |
akurtakov/Pydev | plugins/com.python.pydev.docs/build_homepage.py | 1 | 7928 | import datetime
import os
import shutil
import sys
import build_python_code_block
args = sys.argv[1:]
this_script_path = sys.argv[0]
this_script_dir = os.path.split(this_script_path)[0]
CURRENT_DATE = datetime.datetime.now()
# CURRENT_DATE = datetime.datetime(2017, 9, 20)
update_site_versions = [
'6.3.2',
'6.3.1',
'6.3.0',
'6.2.0',
'6.1.0',
'6.0.0',
'5.9.2',
'5.9.1',
'5.9.0',
'5.8.0',
'5.7.0',
'5.6.0',
'5.5.0',
'5.4.0',
'5.3.1',
'5.3.0',
'5.2.0',
'5.1.2',
'5.1.1',
'5.0.0',
'4.5.5',
'4.5.4',
'4.5.3',
'4.5.1',
'4.5.0',
'old',
]
LAST_VERSION_TAG = update_site_versions[0]
DEFAULT_CONTENTS_TEMPLATE = '''<doc>
<contents_area></contents_area>
%s
</doc>
'''
DEFAULT_AREAS = '''
<right_area>
</right_area>
<image_area></image_area>
<quote_area></quote_area>
'''
DEFAULT_AREAS_MANUAL = '''
<right_area>
</right_area>
<image_area>manual.png</image_area>
<quote_area></quote_area>
'''
#=======================================================================================================================
# BuildFromRst
#=======================================================================================================================
def BuildFromRst(source_filename, is_new_homepage=False):
print source_filename
import os
from docutils import core
# dict of default settings to override (same as in the cmdline params, but as attribute names:
# "--embed-stylesheet" => "embed_stylesheet"
settings_overrides = {}
import os
# publish as html
ret = core.publish_file(
writer_name='html',
source_path=source_filename,
destination_path=os.tempnam(),
settings_overrides=settings_overrides,
)
final = ret[ret.find('<body>') + 6: ret.find('</body>')].strip()
if final.startswith('<div'):
final = final[final.find('\n'):]
final = final[:final.rfind('</div>')]
rst_contents = open(source_filename, 'r').read()
if rst_contents.startswith('..'):
image_area_right_area_and_quote_area = ''
# lines = []
# for line in rst_contents.splitlines():
# if line.strip().startswith('..'):
# lines.append(line.strip()[2:].strip())
# lines = lines[1:] #remove the first (empty) line
# image_area_right_area_and_quote_area = '\n'.join(lines)
else:
if rst_contents.startswith('manual_adv'):
image_area_right_area_and_quote_area = DEFAULT_AREAS
else:
image_area_right_area_and_quote_area = DEFAULT_AREAS_MANUAL
name = source_filename.split('.')[0]
if is_new_homepage:
if os.path.exists(name + '.contents.htm'):
raise AssertionError('This file should not exist: ' + name + '.contents.htm')
if os.path.exists(name + '.contents.html'):
raise AssertionError('This file should not exist: ' + name + '.contents.html')
contents = DEFAULT_CONTENTS_TEMPLATE % (image_area_right_area_and_quote_area,)
final = contents.replace('<contents_area></contents_area>', '<contents_area>%s</contents_area>' % final)
final = final.replace('\r\n', '\n').replace('\r', '\n')
f = open(name + '.contents.rst_html', 'wb')
print >> f, final
f.close()
COMPOSITE_CONTENT = '''<?xml version='1.0' encoding='UTF-8'?>
<?compositeMetadataRepository version='1.0.0'?>
<repository name='"Eclipse Project Test Site"'
type='org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository' version='1.0.0'>
<properties size='1'>
<property name='p2.timestamp' value='{timestamp}'/>
</properties>
<children size='1'>
<child location='https://dl.bintray.com/fabioz/pydev/{version}'/>
</children>
</repository>
'''
COMPOSITE_ARTIFACTS = '''<?xml version='1.0' encoding='UTF-8'?>
<?compositeArtifactRepository version='1.0.0'?>
<repository name='"Eclipse Project Test Site"'
type='org.eclipse.equinox.internal.p2.artifact.repository.CompositeArtifactRepository' version='1.0.0'>
<properties size='1'>
<property name='p2.timestamp' value='{timestamp}'/>
</properties>
<children size='3'>
<child location='https://dl.bintray.com/fabioz/pydev/{version}'/>
</children>
</repository>
'''
INDEX_CONTENTS = '''<!DOCTYPE html>
<html>
<head></head>
<body>PyDev update site aggregator.<br>
<br>
Bundles the following PyDev update site(s):<br>
<br>
<a href="https://dl.bintray.com/fabioz/pydev/{version}">https://dl.bintray.com/fabioz/pydev/{version}</a><br>
</body>
</html>
'''
#=======================================================================================================================
# GenerateRstInDir
#=======================================================================================================================
def GenerateRstInDir(d, is_new_homepage=False):
for f in os.listdir(d):
if f.endswith('.rst'):
BuildFromRst(f, is_new_homepage)
if __name__ == '__main__':
this_script_dir = os.path.realpath(os.path.abspath(this_script_dir))
print 'Directory with this script:', this_script_dir
print 'Generating rst for homepage'
os.chdir(os.path.join(this_script_dir, 'homepage'))
# Copy the update site redirections
shutil.rmtree(os.path.join('final', 'updates'), ignore_errors=True)
shutil.copytree('updates', os.path.join('final', 'updates'))
shutil.rmtree(os.path.join('final', 'nightly'), ignore_errors=True)
shutil.copytree('nightly', os.path.join('final', 'nightly'))
import time
timestamp = str(int(time.time()))
def make_update_site_at_dir(directory, version, force):
try:
os.mkdir(directory)
except:
pass
xml1 = os.path.join(directory, 'compositeArtifacts.xml')
if force or not os.path.exists(xml1):
with open(xml1, 'w') as stream:
stream.write(COMPOSITE_ARTIFACTS.replace('{version}', version).replace('{timestamp}', timestamp))
xml2 = os.path.join(directory, 'compositeContent.xml')
if force or not os.path.exists(xml2):
with open(xml2, 'w') as stream:
stream.write(COMPOSITE_CONTENT.replace('{version}', version).replace('{timestamp}', timestamp))
html = os.path.join(directory, 'index.html')
if force or not os.path.exists(html):
with open(html, 'w') as stream:
stream.write(INDEX_CONTENTS.replace('{version}', version).replace('{timestamp}', timestamp))
make_update_site_at_dir(os.path.join('final', 'updates'), LAST_VERSION_TAG, force=True)
make_update_site_at_dir(os.path.join('final', 'nightly'), LAST_VERSION_TAG, force=True)
for update_site_version in update_site_versions:
make_update_site_at_dir(os.path.join('final', 'update_sites', update_site_version), update_site_version, force=False)
shutil.copyfile('stylesheet.css', os.path.join('final', 'stylesheet.css'))
shutil.copyfile('favicon.ico', os.path.join('final', 'favicon.ico'))
shutil.copyfile('pydev_certificate.cer', os.path.join('final', 'pydev_certificate.cer'))
shutil.copyfile('video_pydev_20.html', os.path.join('final', 'video_pydev_20.html'))
shutil.copyfile('video_swfobject.js', os.path.join('final', 'video_swfobject.js'))
GenerateRstInDir('.', True)
sys.path.insert(0, os.path.join(this_script_dir, 'homepage', 'scripts'))
sys.path.insert(0, '.')
# print 'PYTHONPATH changed. Using:'
# for p in sys.path:
# print ' - ', p
os.chdir(os.path.join(this_script_dir, 'homepage', 'scripts'))
import build_merged # @UnresolvedImport
os.chdir(os.path.join(this_script_dir, 'homepage'))
build_merged.LAST_VERSION_TAG = LAST_VERSION_TAG
build_merged.CURRENT_DATE = CURRENT_DATE
build_merged.DoIt()
sys.stdout.write('Finished\n')
| epl-1.0 | 8,646,195,774,528,780,000 | 30.460317 | 125 | 0.589556 | false | 3.349387 | false | false | false |
electricface/deepin-notifications | src/dbus_notify.py | 1 | 6534 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Hou Shaohui
#
# Author: Hou Shaohui <[email protected]>
# Maintainer: Hou Shaohui <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import dbus
from dbus_utils import DBusProperty, DBusIntrospectable, type_convert
from events import event_manager
from common import Storage
REASON_EXPIRED = 1 # The notification expired.
REASON_DISMISSED = 2 # The notification was dismissed by the user.
REASON_CLOSED = 3 # The notification was closed by a call to CloseNotification.
REASON_UNDEFINED = 4 # Undefined/reserved reasons.
SERVER_CAPABILITIES = [
"action-icons", # Supports using icons instead of text for displaying actions.
"actions", # The server will provide the specified actions to the user.
"body", # Supports body text.
"body-hyperlinks", # The server supports hyperlinks in the notifications.
"body-images", # The server supports images in the notifications.
"body-markup", # Supports markup in the body text.
"icon-multi", # The server will render an animation of all the frames in a given image array.
"icon-static", # Supports display of exactly 1 frame of any given image array.
"persistence", # The server supports persistence of notifications.
"sound", # The server supports sounds on notifications .
]
DEFAULT_STANDARD_HINST = Storage({
"action-icons" : False, # The icon name should be compliant with the Freedesktop.org Icon Naming Specification.
"category" : "", # The type of notification this is.
"desktop-entry" : "", # This specifies the name of the desktop filename representing the calling program.
"image-data" : "", # This is a raw data image format.
"image-path" : "", # Alternative way to define the notification image.
"resident" : False, # This hint is likely only useful when the server has the "persistence" capability.
"sound-file" : "", # The path to a sound file to play when the notification pops up.
"sound-name" : "", # A themeable named sound from the freedesktop.org sound naming specification.
"suppress-sound" : False, # Causes the server to suppress playing any sounds, if it has that ability.
"transient" : False,
"x" : None,
"y" : None,
"urgency" : 1 # 0 Low, 1 Normal, 2 Critical
})
class Notifications(DBusProperty, DBusIntrospectable, dbus.service.Object):
BUS_NAME = "org.freedesktop.Notifications"
PATH = "/org/freedesktop/Notifications"
NOTIFY_IFACE = "org.freedesktop.Notifications"
NOTIFY_ISPEC = """
<method name="CloseNotification">
<arg direction="in" name="id" type="u"/>
</method>
<method name="GetCapabilities">
<arg direction="out" name="caps" type="as"/>
</method>
<method name="GetServerInformation">
<arg direction="out" name="name" type="s"/>
<arg direction="out" name="vendor" type="s"/>
<arg direction="out" name="version" type="s"/>
<arg direction="out" name="spec_version" type="s"/>
</method>
<method name="Notify">
<arg direction="in" name="app_name" type="s" />
<arg direction="in" name="id" type="u" />
<arg direction="in" name="icon" type="s" />
<arg direction="in" name="summary" type="s" />
<arg direction="in" name="body" type="s" />
<arg direction="in" name="actions" type="as" />
<arg direction="in" name="hints" type="a{sv}" />
<arg direction="in" name="timeout" type="i" />
<arg direction="out" name="id" type="u" />
</method>
<signal name="NotificationClosed">
<arg name="id" type="u" />
<arg name="reason" type="u" />
</signal>
<signal name="ActionInvoked">
<arg name="id" type="u" />
<arg name="action_key" type="s" />
</signal>
"""
def __init__(self):
DBusIntrospectable.__init__(self)
DBusProperty.__init__(self)
self.set_introspection(self.NOTIFY_IFACE, self.NOTIFY_ISPEC)
bus = dbus.SessionBus()
name = dbus.service.BusName(self.BUS_NAME, bus)
dbus.service.Object.__init__(self, bus, self.PATH, name)
self.id_cursor = long(0)
@dbus.service.method(NOTIFY_IFACE, in_signature="u")
def CloseNotification(self, replaces_id):
return replaces_id
@dbus.service.method(NOTIFY_IFACE, out_signature="as")
def GetCapabilities(self):
return SERVER_CAPABILITIES
@dbus.service.method(NOTIFY_IFACE, out_signature="ssss")
def GetServerInformation(self):
return "Notifications", "LinuxDeepin", "0.1", "1.2"
@dbus.service.method(NOTIFY_IFACE, in_signature="susssasa{sv}i", out_signature="u")
def Notify(self, app_name, replaces_id, app_icon, summary, body, actions, hints, timeout):
notify_storage = Storage({"app_name" : type_convert.dbus2py(app_name),
"replaces_id" : type_convert.dbus2py(replaces_id),
"app_icon" : type_convert.dbus2py(app_icon),
"summary" : type_convert.dbus2py(summary),
"body" : type_convert.dbus2py(body),
"actions" : type_convert.dbus2py(actions),
"hints" : type_convert.dbus2py(hints),
"expire_timeout" : type_convert.dbus2py(timeout)})
event_manager.emit("notify", notify_storage)
if replaces_id:
notify_storage.id = replaces_id
else:
self.id_cursor += 1
notify_storage.id = self.id_cursor
return notify_storage.id
@dbus.service.signal(NOTIFY_IFACE, signature='uu')
def NotificationClosed(self, id, reason):
pass
@dbus.service.signal(NOTIFY_IFACE, signature='us')
def ActionInvoked(self, id, action_key):
print id, action_key
| gpl-3.0 | -1,757,554,600,149,854,000 | 41.154839 | 115 | 0.640496 | false | 3.67285 | false | false | false |
wursm1/eurobot-hauptsteuerung | eurobot/hauptsteuerung/robot.py | 1 | 3766 | """
This module contains classes that hold the position information of the robots
"""
__author__ = 'Wuersch Marcel'
__license__ = "GPLv3"
import time
import threading
from libraries.can import MsgSender
import numpy as np
class RobotPosition():
""" parent class for PositionMyRobot and PositionOtherRobot
The objects of this class wait for position information over CAN and save them.
They also draw a map where the robot has been on the table.
"""
def __init__(self, can_socket, msg_type, size):
self.size = size
self.position = (0, 0)
self.angle = 0
self.lock = threading.Lock()
resolution = 200
table_size = 2000
self.map = np.zeros((resolution*1.5+1, resolution+1))
self.scale = table_size / resolution
self.last_position_update = 0
self.last_angle_update = 0
self.new_position_data = []
can_socket.create_interrupt(msg_type, self.can_robot_position)
def get_new_position_lock(self):
""" returns a lock which gets released each time new position information is received.
:return: lock
"""
lock = threading.Lock()
self.new_position_data.append(lock)
return lock
def can_robot_position(self, can_msg):
""" waits for new position information, saves them and puts them in the map """
margin = int(200 / self.scale) # minimum distance to an object
# TODO: check sender ID (in case drive and navigation both send)
if can_msg['position_correct'] and can_msg['sender'] == MsgSender.Navigation.value:
x, y = can_msg['x_position'], can_msg['y_position']
with self.lock:
self.position = x, y
self.map[round(x / self.scale) - margin: round(x / self.scale) + margin,
round(y / self.scale) - margin: round(y / self.scale) + margin] += 1
for lock in self.new_position_data: # release all locks
lock.acquire(False)
lock.release()
self.last_position_update = time.time()
if can_msg['angle_correct']:
with self.lock:
self.angle = can_msg['angle'] / 100
self.last_angle_update = time.time()
def get_position(self):
"""
:return: position of the robot (x, y)
"""
with self.lock:
return self.position
def get_angle(self):
"""
:return: angle of the robot
"""
with self.lock:
return self.angle
def get_map(self):
"""
:return: map where the robot has been
"""
with self.lock:
return self.map
class PositionMyRobot(RobotPosition):
""" Holds the position information of the robot on which the program is running. """
def __init__(self, can_socket, msg_type, name, size=20):
super().__init__(can_socket, msg_type, size)
self.name = name
class PositionOtherRobot(RobotPosition):
""" Holds the position information of all other robots. """
def __init__(self, can_socket, msg_type, size=20):
super().__init__(can_socket, msg_type, size)
self.check_thread = threading.Thread(target=self.check_navigation)
self.check_thread.setDaemon(1)
self.check_thread.start()
def check_navigation(self):
""" checks if the position information of the navigation system is to old """
while True:
now = time.time()
if now - self.last_position_update > 0.5:
self.angle = None
if now - self.last_angle_update > 0.5:
self.position = None
time.sleep(0.5) # TODO: set correct time | gpl-3.0 | 4,258,096,413,590,043,000 | 34.87619 | 94 | 0.586298 | false | 4.027807 | false | false | false |
phovea/phovea_server | phovea_server/dataset.py | 1 | 2778 | ###############################################################################
# Caleydo - Visualization for Molecular Biology - http://caleydo.org
# Copyright (c) The Caleydo Team. All rights reserved.
# Licensed under the new BSD license, available at http://caleydo.org/license
###############################################################################
from builtins import str
import phovea_server.plugin
import phovea_server.range
import phovea_server.util
from phovea_server.dataset_def import to_idtype_description
import itertools
_providers_r = None
def _providers():
global _providers_r
if _providers_r is None:
_providers_r = [p.load().factory() for p in phovea_server.plugin.list('dataset-provider')]
return _providers_r
def iter():
"""
an iterator of all known datasets
:return:
"""
return itertools.chain(*_providers())
def list_datasets():
"""
list all known datasets
:return:
"""
return list(iter())
def get(dataset_id):
"""
:param dataset_id:
:return: returns the selected dataset identified by id
"""
for p in _providers():
r = p[dataset_id]
if r is not None:
return r
return None
def add(desc, files=[], id=None):
"""
adds a new dataset to this storage
:param desc: the dict description information
:param files: a list of FileStorage
:param id: optional the unique id to use
:return: the newly created dataset or None if an error occurred
"""
for p in _providers():
r = p.upload(desc, files, id)
if r:
return r
return None
def update(dataset, desc, files=[]):
"""
updates the given dataset
:param dataset: a dataset or a dataset id
:param desc: the dict description information
:param files: a list of FileStorage
:return:
"""
old = get(dataset) if isinstance(dataset, str) else dataset
if old is None:
return add(desc, files)
r = old.update(desc, files)
return r
def remove(dataset):
"""
removes the given dataset
:param dataset: a dataset or a dataset id
:return: boolean whether the operation was successful
"""
old = get(dataset) if isinstance(dataset, str) else dataset
if old is None:
return False
for p in _providers():
if p.remove(old):
return True
return False
def list_idtypes():
tmp = dict()
for d in list_datasets():
for idtype in d.to_idtype_descriptions():
tmp[idtype['id']] = idtype
# also include the known elements from the mapping graph
mapping = get_mappingmanager()
for idtype_id in mapping.known_idtypes():
tmp[idtype_id] = to_idtype_description(idtype_id)
return list(tmp.values())
def get_idmanager():
return phovea_server.plugin.lookup('idmanager')
def get_mappingmanager():
return phovea_server.plugin.lookup('mappingmanager')
| bsd-3-clause | 6,400,642,323,520,684,000 | 22.948276 | 94 | 0.649748 | false | 3.754054 | false | false | false |
ozamiatin/oslo.messaging | oslo_messaging/_drivers/zmq_driver/poller/threading_poller.py | 2 | 2911 | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._drivers.zmq_driver import zmq_poller
zmq = zmq_async.import_zmq()
LOG = logging.getLogger(__name__)
class ThreadingPoller(zmq_poller.ZmqPoller):
def __init__(self):
self.poller = zmq.Poller()
self.sockets_and_recv_methods = {}
def register(self, socket, recv_method=None):
socket_handle = socket.handle
if socket_handle in self.sockets_and_recv_methods:
return
LOG.debug("Registering socket %s", socket_handle.identity)
self.sockets_and_recv_methods[socket_handle] = (socket, recv_method)
self.poller.register(socket_handle, zmq.POLLIN)
def unregister(self, socket):
socket_handle = socket.handle
socket_and_recv_method = \
self.sockets_and_recv_methods.pop(socket_handle, None)
if socket_and_recv_method:
LOG.debug("Unregistering socket %s", socket_handle.identity)
self.poller.unregister(socket_handle)
def poll(self, timeout=None):
if timeout is not None and timeout > 0:
timeout *= 1000 # convert seconds to milliseconds
socket_handles = {}
try:
socket_handles = dict(self.poller.poll(timeout=timeout))
except zmq.ZMQError as e:
LOG.debug("Polling terminated with error: %s", e)
if not socket_handles:
return None, None
for socket_handle in socket_handles:
socket, recv_method = self.sockets_and_recv_methods[socket_handle]
if recv_method:
return recv_method(socket), socket
else:
return socket.recv_multipart(), socket
def close(self):
pass # Nothing to do for threading poller
class ThreadingExecutor(zmq_poller.Executor):
def __init__(self, method):
self._method = method
thread = threading.Thread(target=self._loop)
thread.daemon = True
super(ThreadingExecutor, self).__init__(thread)
self._stop = threading.Event()
def _loop(self):
while not self._stop.is_set():
self._method()
def execute(self):
self.thread.start()
def stop(self):
self._stop.set()
| apache-2.0 | -95,809,660,102,179,120 | 32.079545 | 78 | 0.642391 | false | 4.048679 | false | false | false |
mass-project/mass_server | mass_flask_webui/forms/submit.py | 1 | 2232 | from flask_wtf import Form
from flask_wtf.file import FileField, FileRequired
from wtforms import StringField, SubmitField, SelectField
from wtforms.validators import InputRequired, IPAddress, URL
from mass_flask_core.models import TLPLevelField
class FileSampleSubmitForm(Form):
file = FileField('File', validators=[FileRequired()])
tlp_level = SelectField('Sample privacy (TLP level)', coerce=int, choices=[
(TLPLevelField.TLP_LEVEL_WHITE, 'WHITE (unlimited)'),
(TLPLevelField.TLP_LEVEL_GREEN, 'GREEN (community)'),
(TLPLevelField.TLP_LEVEL_AMBER, 'AMBER (limited distribution)'),
(TLPLevelField.TLP_LEVEL_RED, 'RED (personal for named recipients)'),
])
submit = SubmitField()
class IPSampleSubmitForm(Form):
ip_address = StringField('IPv4/IPv6 address', validators=[InputRequired(), IPAddress()])
tlp_level = SelectField('Sample privacy (TLP level)', coerce=int, choices=[
(TLPLevelField.TLP_LEVEL_WHITE, 'WHITE (unlimited)'),
(TLPLevelField.TLP_LEVEL_GREEN, 'GREEN (community)'),
(TLPLevelField.TLP_LEVEL_AMBER, 'AMBER (limited distribution)'),
(TLPLevelField.TLP_LEVEL_RED, 'RED (personal for named recipients)'),
])
submit = SubmitField()
class DomainSampleSubmitForm(Form):
domain = StringField('Domain name', validators=[InputRequired()])
tlp_level = SelectField('Sample privacy (TLP level)', coerce=int, choices=[
(TLPLevelField.TLP_LEVEL_WHITE, 'WHITE (unlimited)'),
(TLPLevelField.TLP_LEVEL_GREEN, 'GREEN (community)'),
(TLPLevelField.TLP_LEVEL_AMBER, 'AMBER (limited distribution)'),
(TLPLevelField.TLP_LEVEL_RED, 'RED (personal for named recipients)'),
])
submit = SubmitField()
class URISampleSubmitForm(Form):
uri = StringField('URI', validators=[InputRequired(), URL()])
tlp_level = SelectField('Sample privacy (TLP level)', coerce=int, choices=[
(TLPLevelField.TLP_LEVEL_WHITE, 'WHITE (unlimited)'),
(TLPLevelField.TLP_LEVEL_GREEN, 'GREEN (community)'),
(TLPLevelField.TLP_LEVEL_AMBER, 'AMBER (limited distribution)'),
(TLPLevelField.TLP_LEVEL_RED, 'RED (personal for named recipients)'),
])
submit = SubmitField()
| mit | -9,076,948,158,519,788,000 | 44.55102 | 92 | 0.6931 | false | 3.588424 | false | false | false |
ABorgna/BluePoV-PC | bluePoV.py | 1 | 6233 | #
# Banana banana banana
#
# ~ ABorgna
#
#
# Explicacion en 'README-esp.txt'.
#
#
# Los paquetes son listas con n items, n >= 2;
# 1er valor: FUNC - int
# siguientes: DATA - int || list || NumpyArray_t
# Includes
import constants as const
from sockets import *
from transmitter import *
from pygame import Surface,surfarray
import numpy as np
#from numpy import ndarray as NumpyArray_t
from sys import stderr
class ResponseError(Exception):
pass
class BadResponseError(ResponseError):
pass
class NullResponseError(ResponseError):
pass
class Driver(object):
"""
Interface with the POV display
"""
def __init__(self, socket, res, depth=1):
super(Driver, self).__init__()
# Variables
self.resolution = res
self.depth = depth
# The array buffer
if res[1] % 8:
raise ValueError("The display height must be a multiple of 8")
# Image buffer, the data to transmit
self.buffer = np.empty((res[0],res[1],3),dtype=np.uint8)
# Creates the transmitter and connects with the device
self.transmitter = Transmitter(socket)
self.transmitter.start()
# Set the resolution on the device
#self.setTotalWidth(res[0])
self.setResolution(res)
self.setDepth(depth)
self.setDim(0)
# Go
self.syncro()
# Go
self.syncro()
def _send(self,packet,errorStr="Transmission error",retries=0):
"""
Sends the packet
and checks the response for error codes (0xff00-0xfffe)
Response:
>= 0 - Response
< 0 - Error
None - No response
"""
if retries >= 0:
retries += 1
while retries:
retries -= 1
self.transmitter.send(packet)
r = self.transmitter.recv()
if r == None:
if not retries:
stderr.write(errorStr+", couldn't get response\n")
return None
elif 0xffff > r >= 0xff00:
stderr.write(errorStr+", {:#x}\n".format(r))
return -r
else:
return r
def _send_noRcv(self,packet):
"""
Sends the packet,
doesn't wait for the operation to finish
"""
self.transmitter.send(packet)
# Special commands
def ping(self):
r = self._send((const.PING|const.GET,),"Error when pinging")
return r != None
def syncro(self):
self._send((const.STORE|const.SET,),"Error: Snchronization went bad :(")
def clean(self):
self._send((const.CLEAN|const.SET,),"Error cleaning the display")
# Variable setters
def setResolution(self,res):
if res[1] % 8:
raise ValueError("The display height must be a multiple of 8")
self.transmitter.txJoin()
# Height
self._send((const.HEIGHT|const.SET,res[1]),"Error setting the resolution")
# Width
self._send((const.WIDTH|const.SET,res[0]),"Error setting the resolution")
# Resizes the buffer
buffer = np.empty((res[0],res[1],3),dtype=np.uint8)
buffer[0:len(self.buffer)] = self.buffer
self.buffer = buffer
def setDepth(self,depth):
self.transmitter.txJoin()
self._send((const.DEPTH|const.SET,depth),"Error setting the depth")
def setTotalWidth(self,width):
self._send((const.TOTAL_WIDTH|const.SET,width),"Error setting the total width")
def setSpeed(self,s):
self._send((const.SPEED|const.SET,s),"Error setting the speed")
def setDim(self,s):
self._send((const.DIMM|const.SET,s),"Error setting the dimm")
# Variable getters
def getFPS(self):
return self._send((const.FPS|const.GET,),"Error getting the fps")
def getResolution(self):
# Height
h = self._send((const.HEIGHT|const.GET,),"Error getting the resolution")
# Width
w =self._send((const.WIDTH|const.GET,),"Error getting the resolution")
return (w,h)
def getDepth(self):
return self._send((const.DEPTH|const.GET,),"Error getting the depth")
def getTotalWidth(self):
return self._send((const.TOTAL_WIDTH|const.GET,),"Error getting the total width")
def getSpeed(self):
return self._send((const.SPEED|const.GET,),"Error getting the speed")
def getDim(self):
return self._send((const.DIMM|const.GET,),"Error getting the dimm")
def getSpeed(self):
return self._send((const.SPEED|const.GET,),"Error getting the speed")
def getSpeed(self):
return self._send((const.SPEED|const.GET),"Error getting the speed")
# Pygame data writers
def pgBlit(self,surface):
# Copy the matrix as a numpy array
self.buffer = np.copy(surfarray.pixels3d(surface).flatten())
# Is there isn't already a burst task in the queue, create one
if not self.transmitter.burstInQueue.isSet():
self.transmitter.burstInQueue.set()
self._send_noRcv([const.BURST|const.DATA, self.buffer])
def pgBlitColumn(self,surface,pos):
# Copy the column to a numpy array
self.buffer[pos:pos+1] = np.copy(surfarray.pixels3d(surface).flatten())
# Is there isn't already a burst task in the queue, create a write_column task
if not self.transmitter.burstInQueue.isSet():
self._send_noRcv([const.WRITE_COLUMN|const.DATA, pos, self.buffer[pos:pos+1]])
def pgBlitSection(self,surface,pos,lenght):
# Copy the section to a numpy array
self.buffer[pos:pos+lenght] = np.copy(surfarray.pixels3d(surface).flatten())
# Is there isn't already a burst task in the queue, create a write_section task
if not self.transmitter.burstInQueue.isSet():
self._send_noRcv([const.WRITE_SECTION|const.DATA, pos, lenght,
self.buffer[pos:pos+lenght]])
self.setTotalWidth(res[0])
| mit | -7,462,355,119,536,010,000 | 29.165 | 90 | 0.585753 | false | 3.833333 | false | false | false |
ergoregion/Rota-Program | Rota_System/UI/Events/widget_template.py | 1 | 1916 | __author__ = 'Neil Butcher'
from PyQt4 import QtGui, QtCore
import widget_core
from Rota_System.UI.Appointments import AppointmentsListWidget
class EventTemplateWidget(QtGui.QWidget):
commandIssued = QtCore.pyqtSignal(QtGui.QUndoCommand)
criticalCommandIssued = QtCore.pyqtSignal()
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.layout = QtGui.QVBoxLayout(self)
self.core_widget = widget_core.EventTemplateWidget(self)
self.layout.addWidget(self.core_widget)
self.core_widget.commandIssued.connect(self.emitCommand)
self.core_widget.criticalCommandIssued.connect(self.emitCriticalCommand)
self.appointment_widget = AppointmentsListWidget(self)
self.layout.addWidget(self.appointment_widget)
self.appointment_widget.commandIssued.connect(self.emitCommand)
self.appointment_widget.criticalCommandIssued.connect(self.emitCriticalCommand)
@QtCore.pyqtSlot(QtCore.QObject)
def setEvent(self, item):
self.core_widget.setEvent(item)
self.appointment_widget.setEvent(item)
@QtCore.pyqtSlot(QtGui.QUndoCommand)
def emitCommand(self, command):
self.commandIssued.emit(command)
@QtCore.pyqtSlot()
def emitCriticalCommand(self):
self.criticalCommandIssued.emit()
import sys
from Rota_System.Roles import Role, GlobalRoleList
from Rota_System import Events
from Rota_System.UI.model_undo import MasterUndoModel
def main():
GlobalRoleList.add_role(Role('Baker', 'B', 2))
GlobalRoleList.add_role(Role('Steward', 'S', 9))
GlobalRoleList.add_role(Role('Fisherman', 'F', 7))
m = MasterUndoModel()
app = QtGui.QApplication(sys.argv)
w = EventTemplateWidget(None)
e = Events.Event(None)
w.setEvent(e)
m.add_command_contributer(w)
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | mit | 4,932,403,053,297,764,000 | 28.953125 | 87 | 0.709812 | false | 3.502742 | false | false | false |
brettwooldridge/buck | programs/tracing.py | 4 | 4241 | #!/usr/bin/env python
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import glob
import json
import os
import os.path
import platform
import time
import uuid
from timing import monotonic_time_nanos
def create_symlink(original, symlink):
if platform.system() == "Windows":
# Not worth dealing with the convenience symlink on Windows.
return
else:
(symlink_dir, symlink_file) = os.path.split(symlink)
# Avoid race conditions with other processes by:
#
# 1) Creating a symlink /path/to/.symlink_file.UUID -> /path/to/original
# 2) Atomically renaming /path/to/.symlink_file.UUID -> /path/to/symlink_file
#
# If another process races with this one, the most recent one wins, which
# is the behavior we want.
temp_symlink_filename = ".{0}.{1}".format(symlink_file, uuid.uuid4())
temp_symlink_path = os.path.join(symlink_dir, temp_symlink_filename)
os.symlink(original, temp_symlink_path)
os.rename(temp_symlink_path, symlink)
class _TraceEventPhases(object):
BEGIN = "B"
END = "E"
IMMEDIATE = "I"
COUNTER = "C"
ASYNC_START = "S"
ASYNC_FINISH = "F"
OBJECT_SNAPSHOT = "O"
OBJECT_NEW = "N"
OBJECT_DELETE = "D"
METADATA = "M"
class Tracing(object):
_trace_events = [
{
"name": "process_name",
"ph": _TraceEventPhases.METADATA,
"pid": os.getpid(),
"args": {"name": "buck.py"},
}
]
def __init__(self, name, args={}):
self.name = name
self.args = args
self.pid = os.getpid()
def __enter__(self):
now_us = monotonic_time_nanos() / 1000
self._add_trace_event(
"buck-launcher",
self.name,
_TraceEventPhases.BEGIN,
self.pid,
1,
now_us,
self.args,
)
def __exit__(self, x_type, x_value, x_traceback):
now_us = monotonic_time_nanos() / 1000
self._add_trace_event(
"buck-launcher",
self.name,
_TraceEventPhases.END,
self.pid,
1,
now_us,
self.args,
)
@staticmethod
def _add_trace_event(category, name, phase, pid, tid, ts, args):
Tracing._trace_events.append(
{
"cat": category,
"name": name,
"ph": phase,
"pid": pid,
"tid": tid,
"ts": ts,
"args": args,
}
)
@staticmethod
def write_to_dir(buck_log_dir, build_id):
filename_time = time.strftime("%Y-%m-%d.%H-%M-%S")
trace_filename = os.path.join(
buck_log_dir, "launch.{0}.{1}.trace".format(filename_time, build_id)
)
trace_filename_link = os.path.join(buck_log_dir, "launch.trace")
try:
os.makedirs(buck_log_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(trace_filename, "w") as f:
json.dump(Tracing._trace_events, f)
create_symlink(trace_filename, trace_filename_link)
Tracing.clean_up_old_logs(buck_log_dir)
@staticmethod
def clean_up_old_logs(buck_log_dir, logs_to_keep=25):
traces = filter(
os.path.isfile, glob.glob(os.path.join(buck_log_dir, "launch.*.trace"))
)
try:
traces = sorted(traces, key=os.path.getmtime)
for f in traces[:-logs_to_keep]:
os.remove(f)
except OSError:
return # a concurrent run cleaned up the logs
| apache-2.0 | -5,381,889,734,795,819,000 | 28.866197 | 85 | 0.564725 | false | 3.700698 | false | false | false |
coteyr/home-assistant | homeassistant/components/binary_sensor/template.py | 1 | 3823 | """
homeassistant.components.binary_sensor.template
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for exposing a templated binary_sensor
"""
import logging
from homeassistant.components.binary_sensor import (BinarySensorDevice,
DOMAIN,
SENSOR_CLASSES)
from homeassistant.const import ATTR_FRIENDLY_NAME, CONF_VALUE_TEMPLATE
from homeassistant.core import EVENT_STATE_CHANGED
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers import template
from homeassistant.util import slugify
ENTITY_ID_FORMAT = DOMAIN + '.{}'
CONF_SENSORS = 'sensors'
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup template binary sensors."""
sensors = []
if config.get(CONF_SENSORS) is None:
_LOGGER.error('Missing configuration data for binary_sensor platform')
return False
for device, device_config in config[CONF_SENSORS].items():
if device != slugify(device):
_LOGGER.error('Found invalid key for binary_sensor.template: %s. '
'Use %s instead', device, slugify(device))
continue
if not isinstance(device_config, dict):
_LOGGER.error('Missing configuration data for binary_sensor %s',
device)
continue
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
sensor_class = device_config.get('sensor_class')
value_template = device_config.get(CONF_VALUE_TEMPLATE)
if sensor_class not in SENSOR_CLASSES:
_LOGGER.error('Sensor class is not valid')
continue
if value_template is None:
_LOGGER.error(
'Missing %s for sensor %s', CONF_VALUE_TEMPLATE, device)
continue
sensors.append(
BinarySensorTemplate(
hass,
device,
friendly_name,
sensor_class,
value_template)
)
if not sensors:
_LOGGER.error('No sensors added')
return False
add_devices(sensors)
return True
class BinarySensorTemplate(BinarySensorDevice):
"""A virtual binary_sensor that triggers from another sensor."""
# pylint: disable=too-many-arguments
def __init__(self, hass, device, friendly_name, sensor_class,
value_template):
self._hass = hass
self._device = device
self._name = friendly_name
self._sensor_class = sensor_class
self._template = value_template
self._state = None
self.entity_id = generate_entity_id(
ENTITY_ID_FORMAT, device,
hass=hass)
_LOGGER.info('Started template sensor %s', device)
hass.bus.listen(EVENT_STATE_CHANGED, self._event_listener)
def _event_listener(self, event):
self.update_ha_state(True)
@property
def should_poll(self):
return False
@property
def sensor_class(self):
return self._sensor_class
@property
def name(self):
return self._name
@property
def is_on(self):
return self._state
def update(self):
try:
value = template.render(self._hass, self._template)
except TemplateError as ex:
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"):
# Common during HA startup - so just a warning
_LOGGER.warning(ex)
return
_LOGGER.error(ex)
value = 'false'
self._state = value.lower() == 'true'
| mit | -2,837,158,572,584,971,000 | 30.336066 | 78 | 0.589589 | false | 4.54038 | true | false | false |
AdamWill/anaconda | pyanaconda/startup_utils.py | 1 | 17832 | #
# startup_utils.py - code used during early startup with minimal dependencies
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.i18n import _
import logging
log = logging.getLogger("anaconda")
stdout_log = logging.getLogger("anaconda.stdout")
import sys
import time
import imp
import os
from pyanaconda import iutil
from pyanaconda import product
from pyanaconda import constants
from pyanaconda import geoloc
from pyanaconda import anaconda_log
from pyanaconda import network
from pyanaconda import safe_dbus
from pyanaconda import kickstart
from pyanaconda.flags import flags
from pyanaconda.flags import can_touch_runtime_system
from pyanaconda.screensaver import inhibit_screensaver
import blivet
def module_exists(module_path):
"""Report is a given module exists in the current module import pth or not.
Supports checking bot modules ("foo") os submodules ("foo.bar.baz")
:param str module_path: (sub)module identifier
:returns: True if (sub)module exists in path, False if not
:rtype: bool
"""
module_path_components = module_path.split(".")
module_name = module_path_components.pop()
parent_module_path = None
if module_path_components:
# the path specifies a submodule ("bar.foo")
# we need to chain-import all the modules in the submodule path before
# we can check if the submodule itself exists
for name in module_path_components:
module_info = imp.find_module(name, parent_module_path)
module = imp.load_module(name, *module_info)
if module:
parent_module_path = module.__path__
else:
# one of the parents was not found, abort search
return False
# if we got this far we should have either some path or the module is
# not a submodule and the default set of paths will be used (path=None)
try:
# if the module is not found imp raises an ImportError
imp.find_module(module_name, parent_module_path)
return True
except ImportError:
return False
def get_anaconda_version_string():
"""Return a string describing current Anaconda version.
If the current version can't be determined the string
"unknown" will be returned.
:returns: string describing Anaconda version
:rtype: str
"""
# we are importing the version module directly so that we don't drag in any
# non-necessary stuff; we also need to handle the possibility of the
# import itself failing
if module_exists("pyanaconda.version"):
# Ignore pylint not finding the version module, since thanks to automake
# there's a good chance that version.py is not in the same directory as
# the rest of pyanaconda.
from pyanaconda import version # pylint: disable=no-name-in-module
return version.__version__
else:
return "unknown"
def gtk_warning(title, reason):
"""A simple warning dialog for use during early startup of the Anaconda GUI.
:param str title: title of the warning dialog
:param str reason: warning message
TODO: this should be abstracted out to some kind of a "warning API" + UI code
that shows the actual warning
"""
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
dialog = Gtk.MessageDialog(type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CLOSE,
message_format=reason)
dialog.set_title(title)
dialog.run()
dialog.destroy()
def check_memory(anaconda, options, display_mode=None):
"""Check is the system has enough RAM for installation.
:param anaconda: instance of the Anaconda class
:param options: command line/boot options
:param display_mode: a display mode to use for the check
(graphical mode usually needs more RAM, etc.)
"""
from pyanaconda import isys
reason_strict = _("%(product_name)s requires %(needed_ram)s MB of memory to "
"install, but you only have %(total_ram)s MB on this machine.\n")
reason_graphical = _("The %(product_name)s graphical installer requires %(needed_ram)s "
"MB of memory, but you only have %(total_ram)s MB\n.")
reboot_extra = _('\n'
'Press [Enter] to reboot your system.\n')
livecd_title = _("Not enough RAM")
livecd_extra = _(" Try the text mode installer by running:\n\n"
"'/usr/bin/liveinst -T'\n\n from a root terminal.")
nolivecd_extra = _(" Starting text mode.")
# skip the memory check in rescue mode
if options.rescue:
return
if not display_mode:
display_mode = anaconda.display_mode
reason = reason_strict
total_ram = int(isys.total_memory() / 1024)
needed_ram = int(isys.MIN_RAM)
graphical_ram = int(isys.MIN_GUI_RAM)
# count the squashfs.img in if it is kept in RAM
if not iutil.persistent_root_image():
needed_ram += isys.SQUASHFS_EXTRA_RAM
graphical_ram += isys.SQUASHFS_EXTRA_RAM
log.info("check_memory(): total:%s, needed:%s, graphical:%s",
total_ram, needed_ram, graphical_ram)
if not options.memcheck:
log.warning("CHECK_MEMORY DISABLED")
return
reason_args = {"product_name": product.productName,
"needed_ram": needed_ram,
"total_ram": total_ram}
if needed_ram > total_ram:
if options.liveinst:
# pylint: disable=logging-not-lazy
stdout_log.warning(reason % reason_args)
gtk_warning(livecd_title, reason % reason_args)
else:
reason += reboot_extra
print(reason % reason_args)
print(_("The installation cannot continue and the system will be rebooted"))
print(_("Press ENTER to continue"))
input()
iutil.ipmi_report(constants.IPMI_ABORTED)
sys.exit(1)
# override display mode if machine cannot nicely run X
if display_mode != constants.DisplayModes.TUI and not flags.usevnc:
needed_ram = graphical_ram
reason_args["needed_ram"] = graphical_ram
reason = reason_graphical
if needed_ram > total_ram:
if options.liveinst:
reason += livecd_extra
# pylint: disable=logging-not-lazy
stdout_log.warning(reason % reason_args)
title = livecd_title
gtk_warning(title, reason % reason_args)
iutil.ipmi_report(constants.IPMI_ABORTED)
sys.exit(1)
else:
reason += nolivecd_extra
# pylint: disable=logging-not-lazy
stdout_log.warning(reason % reason_args)
anaconda.display_mode = constants.DisplayModes.TUI
time.sleep(2)
def start_geolocation(provider_id=constants.GEOLOC_DEFAULT_PROVIDER):
"""Start an asynchronous geolocation attempt.
The data from geolocation is used to pre-select installation language and timezone.
:param str provider_id: geolocation provider id
"""
# check if the provider id is valid
parsed_id = geoloc.get_provider_id_from_option(provider_id)
if parsed_id is None:
log.error('geoloc: wrong provider id specified: %s', provider_id)
else:
provider_id = parsed_id
# instantiate the geolocation module and start location data refresh
geoloc.init_geolocation(provider_id=provider_id)
geoloc.refresh()
def setup_logging_from_options(options):
"""Configure logging according to Anaconda command line/boot options.
:param options: Anaconda command line/boot options
"""
if (options.debug or options.updateSrc) and not options.loglevel:
# debugging means debug logging if an explicit level hasn't been st
options.loglevel = "debug"
if options.loglevel and options.loglevel in anaconda_log.logLevelMap:
log.info("Switching logging level to %s", options.loglevel)
level = anaconda_log.logLevelMap[options.loglevel]
anaconda_log.logger.loglevel = level
anaconda_log.setHandlersLevel(log, level)
storage_log = logging.getLogger("storage")
anaconda_log.setHandlersLevel(storage_log, level)
packaging_log = logging.getLogger("packaging")
anaconda_log.setHandlersLevel(packaging_log, level)
if can_touch_runtime_system("syslog setup"):
if options.syslog:
anaconda_log.logger.updateRemote(options.syslog)
if options.remotelog:
try:
host, port = options.remotelog.split(":", 1)
port = int(port)
anaconda_log.logger.setup_remotelog(host, port)
except ValueError:
log.error("Could not setup remotelog with %s", options.remotelog)
def prompt_for_ssh():
"""Prompt the user to ssh to the installation environment on the s390."""
# Do some work here to get the ip addr / hostname to pass
# to the user.
import socket
ip = network.getFirstRealIP()
if not ip:
stdout_log.error("No IP addresses found, cannot continue installation.")
iutil.ipmi_report(constants.IPMI_ABORTED)
sys.exit(1)
ipstr = ip
try:
hinfo = socket.gethostbyaddr(ipstr)
except socket.herror as e:
stdout_log.debug("Exception caught trying to get host name of %s: %s", ipstr, e)
name = network.getHostname()
else:
if len(hinfo) == 3:
name = hinfo[0]
if ip.find(':') != -1:
ipstr = "[%s]" % (ip,)
if (name is not None) and (not name.startswith('localhost')) and (ipstr is not None):
connxinfo = "%s (%s)" % (socket.getfqdn(name=name), ipstr,)
elif ipstr is not None:
connxinfo = "%s" % (ipstr,)
else:
connxinfo = None
if connxinfo:
stdout_log.info(_("Please ssh install@%s to begin the install."), connxinfo)
else:
stdout_log.info(_("Please ssh install@HOSTNAME to continue installation."))
def clean_pstore():
"""Remove files stored in nonvolatile ram created by the pstore subsystem.
Files in pstore are Linux (not distribution) specific, but we want to
make sure the entirety of them are removed so as to ensure that there
is sufficient free space on the flash part. On some machines this will
take effect immediately, which is the best case. Unfortunately on some,
an intervening reboot is needed.
"""
iutil.dir_tree_map("/sys/fs/pstore", os.unlink, files=True, dirs=False)
def print_startup_note(options):
"""Print Anaconda version and short usage instructions.
Print Anaconda version and short usage instruction to the TTY where Anaconda is running.
:param options: command line/boot options
"""
verdesc = "%s for %s %s" % (get_anaconda_version_string(),
product.productName, product.productVersion)
logs_note = " * installation log files are stored in /tmp during the installation"
shell_and_tmux_note = " * shell is available on TTY2"
shell_only_note = " * shell is available on TTY2 and in second TMUX pane (ctrl+b, then press 2)"
tmux_only_note = " * shell is available in second TMUX pane (ctrl+b, then press 2)"
text_mode_note = " * if the graphical installation interface fails to start, try again with the\n"\
" inst.text bootoption to start text installation"
separate_attachements_note = " * when reporting a bug add logs from /tmp as separate text/plain attachments"
if product.isFinal:
print("anaconda %s started." % verdesc)
else:
print("anaconda %s (pre-release) started." % verdesc)
if not options.images and not options.dirinstall:
print(logs_note)
# no fancy stuff like TTYs on a s390...
if not blivet.arch.is_s390():
if "TMUX" in os.environ and os.environ.get("TERM") == "screen":
print(shell_and_tmux_note)
else:
print(shell_only_note) # TMUX is not running
# ...but there is apparently TMUX during the manual installation on s390!
elif not options.ksfile:
print(tmux_only_note) # but not during kickstart installation
# no need to tell users how to switch to text mode
# if already in text mode
if options.display_mode == constants.DisplayModes.TUI:
print(text_mode_note)
print(separate_attachements_note)
def live_startup(anaconda, options):
"""Live environment startup tasks.
:param anaconda: instance of the Anaconda class
:param options: command line/boot options
"""
flags.livecdInstall = True
try:
anaconda.dbus_session_connection = safe_dbus.get_new_session_connection()
except safe_dbus.DBusCallError as e:
log.info("Unable to connect to DBus session bus: %s", e)
else:
anaconda.dbus_inhibit_id = inhibit_screensaver(anaconda.dbus_session_connection)
def set_installation_method_from_anaconda_options(anaconda, ksdata):
"""Set the installation method from Anaconda options.
This basically means to set the installation method from options provided
to Anaconda via command line/boot options.
:param anaconda: instance of the Anaconda class
:param ksdata: data model corresponding to the installation kickstart
"""
if anaconda.methodstr.startswith("cdrom"):
ksdata.method.method = "cdrom"
elif anaconda.methodstr.startswith("nfs"):
ksdata.method.method = "nfs"
nfs_options, server, path = iutil.parseNfsUrl(anaconda.methodstr)
ksdata.method.server = server
ksdata.method.dir = path
ksdata.method.opts = nfs_options
elif anaconda.methodstr.startswith("hd:"):
ksdata.method.method = "harddrive"
url = anaconda.methodstr.split(":", 1)[1]
url_parts = url.split(":")
device = url_parts[0]
path = ""
if len(url_parts) == 2:
path = url_parts[1]
elif len(url_parts) == 3:
path = url_parts[2]
ksdata.method.partition = device
ksdata.method.dir = path
elif anaconda.methodstr.startswith("http") or anaconda.methodstr.startswith("ftp") or anaconda.methodstr.startswith("file"):
ksdata.method.method = "url"
ksdata.method.url = anaconda.methodstr
# installation source specified by bootoption
# overrides source set from kickstart;
# the kickstart might have specified a mirror list,
# so we need to clear it here if plain url source is provided
# by a bootoption, because having both url & mirror list
# set at once is not supported and breaks dnf in
# unpredictable ways
# FIXME: Is this still needed for dnf?
ksdata.method.mirrorlist = None
elif anaconda.methodstr.startswith("livecd"):
ksdata.method.method = "harddrive"
device = anaconda.methodstr.split(":", 1)[1]
ksdata.method.partition = os.path.normpath(device)
else:
log.error("Unknown method: %s", anaconda.methodstr)
def parse_kickstart(options, addon_paths):
"""Parse the input kickstart.
If we were given a kickstart file, parse (but do not execute) that now.
Otherwise, load in defaults from kickstart files shipped with the
installation media. Pick up any changes from interactive-defaults.ks
that would otherwise be covered by the dracut KS parser.
:param options: command line/boot options
:param dict addon_paths: addon paths dictionary
:returns: kickstart parsed to a data model
"""
ksdata = None
if options.ksfile and not options.liveinst:
if not os.path.exists(options.ksfile):
stdout_log.error("Kickstart file %s is missing.", options.ksfile)
iutil.ipmi_report(constants.IPMI_ABORTED)
sys.exit(1)
flags.automatedInstall = True
flags.eject = False
ks_files = [options.ksfile]
elif os.path.exists("/run/install/ks.cfg") and not options.liveinst:
# this is to handle such cases where a user has pre-loaded a
# ks.cfg onto an OEMDRV labeled device
flags.automatedInstall = True
flags.eject = False
ks_files = ["/run/install/ks.cfg"]
else:
ks_files = ["/tmp/updates/interactive-defaults.ks",
"/usr/share/anaconda/interactive-defaults.ks"]
for ks in ks_files:
if not os.path.exists(ks):
continue
kickstart.preScriptPass(ks)
log.info("Parsing kickstart: " + ks)
ksdata = kickstart.parseKickstart(ks)
# Only load the first defaults file we find.
break
if not ksdata:
ksdata = kickstart.AnacondaKSHandler(addon_paths["ks"])
return ksdata
| gpl-2.0 | 4,923,131,784,954,620,000 | 38.191209 | 128 | 0.656068 | false | 3.978581 | false | false | false |
azumimuo/family-xbmc-addon | script.module.urlresolver/lib/urlresolver/plugins/mooshare_biz.py | 2 | 2529 | '''
Allmyvideos urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import xbmc
class MooShareResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "mooshare"
domains = [ "mooshare.biz" ]
pattern = '(?://|\.)(mooshare\.biz)/(?:embed-|iframe/)?([0-9a-zA-Z]+)'
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
data = {}
if '<form role="search"' in html and '<Form method="POST" action=\'\'>' in html: html=html.split('<Form method="POST" action=\'\'>')[1]
r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', html)
for name, value in r:
data[name] = value
data[u'referer']=''; data[u'usr_login']=''; data[u'imhuman']='Proceed to video'; data[u'btn_download']='Proceed to video';
xbmc.sleep(5000)
html = self.net.http_POST(url, data).content
r = re.search('file\s*:\s*"(.+?)"', html)
if r:
return r.group(1)
else:
raise UrlResolver.ResolverError('could not find video')
def get_url(self, host, media_id):
return 'http://mooshare.biz/%s' % media_id
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
| gpl-2.0 | -2,058,027,579,635,291,400 | 36.746269 | 147 | 0.624753 | false | 3.724595 | false | false | false |
shibu/pyspec | pyspec/wxui/util.py | 1 | 1803 | # -*- coding: ascii -*-
"""Idea of event binder decoretor is Mr.NoboNobo's and TurboGears. Thanks.
Mr.NoboNobo's site: http://python.matrix.jp
"""
__pyspec = 1
__all__ = ('expose',
'bind_event_handler',
'get_resource_path',
'load_png')
import pyspec.util
attr_key = "__pyspec_wxutil_eventhandler"
class binder_class(object):
def __init__(self, event, id):
self.event = event
self.id = id
def __call__(self, method):
from pyspec.util import Struct
event_info = Struct(event=self.event, id=self.id)
if hasattr(method, attr_key):
getattr(method, attr_key).append(event_info)
else:
setattr(method, attr_key, [event_info])
return method
def expose(event, id=None):
return binder_class(event, id)
def bind_event_handler(frame, controller=None):
import wx
from wx.xrc import XRCID
if controller is None:
controller = frame
for name in dir(controller):
obj = getattr(controller, name)
if hasattr(obj, attr_key):
for event_info in getattr(obj, attr_key):
if event_info.id is None:
frame.Bind(event_info.event, obj)
else:
frame.Bind(event_info.event, obj, id=XRCID(event_info.id))
def get_resource_path(filename):
import os
if os.path.exists("resource"):
return os.path.join("resource", filename)
path_in_lib = pyspec.util.pyspec_file_path("resource", filename)
if os.path.exists(path_in_lib):
return path_in_lib
return os.path.abspath(os.path.join(path_in_lib, "..", "..", "..", "resource", filename))
def load_png(filename):
import wx
return wx.Image(filename, wx.BITMAP_TYPE_PNG).ConvertToBitmap()
| mit | -4,515,592,377,775,067,000 | 26.738462 | 93 | 0.601775 | false | 3.401887 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.